mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
mm,hugetlb: make unmap_ref_private() return void
This function always returns 1, thus no need to check return value in hugetlb_cow(). By doing so, we can get rid of the unnecessary WARN_ON call. While this logic perhaps existed as a way of identifying future unmap_ref_private() mishandling, reality is it serves no apparent purpose. Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Cc: Aswin Chandramouleeswaran <aswin@hp.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
eb39d618f9
commit
2f4612af43
32
mm/hugetlb.c
32
mm/hugetlb.c
@ -2754,8 +2754,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
||||
* from other VMAs and let the children be SIGKILLed if they are faulting the
|
||||
* same region.
|
||||
*/
|
||||
static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long address)
|
||||
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long address)
|
||||
{
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
struct vm_area_struct *iter_vma;
|
||||
@ -2794,8 +2794,6 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
address + huge_page_size(h), page);
|
||||
}
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2857,20 +2855,18 @@ retry_avoidcopy:
|
||||
*/
|
||||
if (outside_reserve) {
|
||||
BUG_ON(huge_pte_none(pte));
|
||||
if (unmap_ref_private(mm, vma, old_page, address)) {
|
||||
BUG_ON(huge_pte_none(pte));
|
||||
spin_lock(ptl);
|
||||
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
||||
if (likely(ptep &&
|
||||
pte_same(huge_ptep_get(ptep), pte)))
|
||||
goto retry_avoidcopy;
|
||||
/*
|
||||
* race occurs while re-acquiring page table
|
||||
* lock, and our job is done.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
unmap_ref_private(mm, vma, old_page, address);
|
||||
BUG_ON(huge_pte_none(pte));
|
||||
spin_lock(ptl);
|
||||
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
||||
if (likely(ptep &&
|
||||
pte_same(huge_ptep_get(ptep), pte)))
|
||||
goto retry_avoidcopy;
|
||||
/*
|
||||
* race occurs while re-acquiring page table
|
||||
* lock, and our job is done.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Caller expects lock to be held */
|
||||
|
Loading…
Reference in New Issue
Block a user