diff --git a/mm/mlock.c b/mm/mlock.c index b562b5523a65..dfc6f1912176 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -365,8 +365,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) * @start + PAGE_SIZE when no page could be added by the pte walk. */ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, - struct vm_area_struct *vma, int zoneid, unsigned long start, - unsigned long end) + struct vm_area_struct *vma, struct zone *zone, + unsigned long start, unsigned long end) { pte_t *pte; spinlock_t *ptl; @@ -394,7 +394,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, * Break if page could not be obtained or the page's node+zone does not * match */ - if (!page || page_zone_id(page) != zoneid) + if (!page || page_zone(page) != zone) break; /* @@ -446,7 +446,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long page_increm; struct pagevec pvec; struct zone *zone; - int zoneid; pagevec_init(&pvec, 0); /* @@ -481,7 +480,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, */ pagevec_add(&pvec, page); zone = page_zone(page); - zoneid = page_zone_id(page); /* * Try to fill the rest of pagevec using fast @@ -490,7 +488,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, * pagevec. */ start = __munlock_pagevec_fill(&pvec, vma, - zoneid, start, end); + zone, start, end); __munlock_pagevec(&pvec, zone); goto next; }