forked from Minki/linux
mm,hugetlb: take hugetlb_lock before decrementing h->resv_huge_pages
The h->*_huge_pages counters are protected by the hugetlb_lock, but
alloc_huge_page has a corner case where it can decrement the counter
outside of the lock.
This could lead to a corrupted value of h->resv_huge_pages, which we have
observed on our systems.
Take the hugetlb_lock before decrementing h->resv_huge_pages to avoid a
potential race.
Link: https://lkml.kernel.org/r/20221017202505.0e6a4fcd@imladris.surriel.com
Fixes: a88c769548
("mm: hugetlb: fix hugepage memory leak caused by wrong reserve count")
Signed-off-by: Rik van Riel <riel@surriel.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Glen McCready <gkmccready@meta.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a57b70519d
commit
12df140f0b
@ -2924,11 +2924,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
|
||||
if (!page)
|
||||
goto out_uncharge_cgroup;
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
|
||||
SetHPageRestoreReserve(page);
|
||||
h->resv_huge_pages--;
|
||||
}
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
list_add(&page->lru, &h->hugepage_activelist);
|
||||
set_page_refcounted(page);
|
||||
/* Fall through */
|
||||
|
Loading…
Reference in New Issue
Block a user