forked from Minki/linux
mm: hugetlb: fix __unmap_hugepage_range()
First, after flushing TLB, we have no need to scan pte from start again. Second, before bail out loop, the address is forwarded one step. Signed-off-by: Hillf Danton <hillf.zj@alibaba-inc.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e4bd6a0248
commit
569f48b858
@ -2638,8 +2638,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
|
||||
tlb_start_vma(tlb, vma);
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
address = start;
|
||||
again:
|
||||
for (address = start; address < end; address += sz) {
|
||||
for (; address < end; address += sz) {
|
||||
ptep = huge_pte_offset(mm, address);
|
||||
if (!ptep)
|
||||
continue;
|
||||
@ -2686,6 +2687,7 @@ again:
|
||||
page_remove_rmap(page);
|
||||
force_flush = !__tlb_remove_page(tlb, page);
|
||||
if (force_flush) {
|
||||
address += sz;
|
||||
spin_unlock(ptl);
|
||||
break;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user