mm, slub: move irq control into unfreeze_partials()
unfreeze_partials() can be optimized so that it doesn't need irqs disabled for the whole time. As the first step, move irq control into the function and remove it from the put_cpu_partial() caller. Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
13
mm/slub.c
13
mm/slub.c
@@ -2350,9 +2350,8 @@ redo:
|
|||||||
/*
|
/*
|
||||||
* Unfreeze all the cpu partial slabs.
|
* Unfreeze all the cpu partial slabs.
|
||||||
*
|
*
|
||||||
* This function must be called with interrupts disabled
|
* This function must be called with preemption or migration
|
||||||
* for the cpu using c (or some other guarantee must be there
|
* disabled with c local to the cpu.
|
||||||
* to guarantee no concurrent accesses).
|
|
||||||
*/
|
*/
|
||||||
static void unfreeze_partials(struct kmem_cache *s,
|
static void unfreeze_partials(struct kmem_cache *s,
|
||||||
struct kmem_cache_cpu *c)
|
struct kmem_cache_cpu *c)
|
||||||
@@ -2360,6 +2359,9 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|||||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||||
struct kmem_cache_node *n = NULL, *n2 = NULL;
|
struct kmem_cache_node *n = NULL, *n2 = NULL;
|
||||||
struct page *page, *discard_page = NULL;
|
struct page *page, *discard_page = NULL;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
while ((page = slub_percpu_partial(c))) {
|
while ((page = slub_percpu_partial(c))) {
|
||||||
struct page new;
|
struct page new;
|
||||||
@@ -2412,6 +2414,8 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
stat(s, FREE_SLAB);
|
stat(s, FREE_SLAB);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
#endif /* CONFIG_SLUB_CPU_PARTIAL */
|
#endif /* CONFIG_SLUB_CPU_PARTIAL */
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2439,14 +2443,11 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|||||||
pobjects = oldpage->pobjects;
|
pobjects = oldpage->pobjects;
|
||||||
pages = oldpage->pages;
|
pages = oldpage->pages;
|
||||||
if (drain && pobjects > slub_cpu_partial(s)) {
|
if (drain && pobjects > slub_cpu_partial(s)) {
|
||||||
unsigned long flags;
|
|
||||||
/*
|
/*
|
||||||
* partial array is full. Move the existing
|
* partial array is full. Move the existing
|
||||||
* set to the per node partial list.
|
* set to the per node partial list.
|
||||||
*/
|
*/
|
||||||
local_irq_save(flags);
|
|
||||||
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
|
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
|
||||||
local_irq_restore(flags);
|
|
||||||
oldpage = NULL;
|
oldpage = NULL;
|
||||||
pobjects = 0;
|
pobjects = 0;
|
||||||
pages = 0;
|
pages = 0;
|
||||||
|
|||||||
Reference in New Issue
Block a user