mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
mm: add preempt points into __purge_vmap_area_lazy()
Use cond_resched_lock to avoid holding the vmap_area_lock for a potentially long time and thus creating bad latencies for various workloads. [hch: split from a larger patch by Joel, wrote the crappy changelog] Link: http://lkml.kernel.org/r/1479474236-4139-11-git-send-email-hch@lst.de Signed-off-by: Joel Fernandes <joelaf@google.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Jisheng Zhang <jszhang@marvell.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: John Dias <joaodias@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f9e0997767
commit
763b218ddf
14
mm/vmalloc.c
14
mm/vmalloc.c
@ -628,7 +628,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
|
|||||||
struct llist_node *valist;
|
struct llist_node *valist;
|
||||||
struct vmap_area *va;
|
struct vmap_area *va;
|
||||||
struct vmap_area *n_va;
|
struct vmap_area *n_va;
|
||||||
int nr = 0;
|
bool do_free = false;
|
||||||
|
|
||||||
lockdep_assert_held(&vmap_purge_lock);
|
lockdep_assert_held(&vmap_purge_lock);
|
||||||
|
|
||||||
@ -638,18 +638,22 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
|
|||||||
start = va->va_start;
|
start = va->va_start;
|
||||||
if (va->va_end > end)
|
if (va->va_end > end)
|
||||||
end = va->va_end;
|
end = va->va_end;
|
||||||
nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
|
do_free = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!nr)
|
if (!do_free)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
atomic_sub(nr, &vmap_lazy_nr);
|
|
||||||
flush_tlb_kernel_range(start, end);
|
flush_tlb_kernel_range(start, end);
|
||||||
|
|
||||||
spin_lock(&vmap_area_lock);
|
spin_lock(&vmap_area_lock);
|
||||||
llist_for_each_entry_safe(va, n_va, valist, purge_list)
|
llist_for_each_entry_safe(va, n_va, valist, purge_list) {
|
||||||
|
int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
|
||||||
|
|
||||||
__free_vmap_area(va);
|
__free_vmap_area(va);
|
||||||
|
atomic_sub(nr, &vmap_lazy_nr);
|
||||||
|
cond_resched_lock(&vmap_area_lock);
|
||||||
|
}
|
||||||
spin_unlock(&vmap_area_lock);
|
spin_unlock(&vmap_area_lock);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user