mm/hugetlb: increase use of folios in alloc_huge_page()

Change hugetlb_cgroup_commit_charge{,_rsvd}(), dequeue_huge_page_vma() and
alloc_buddy_huge_page_with_mpol() to use folios so alloc_huge_page() is
cleaned by operating on folios until its return.

Link: https://lkml.kernel.org/r/20230113223057.173292-6-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Sidhartha Kumar 2023-01-13 16:30:54 -06:00 committed by Andrew Morton
parent 3a740e8bb5
commit ff7d853b03
3 changed files with 22 additions and 27 deletions

View File

@ -141,10 +141,10 @@ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr); struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page); struct folio *folio);
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page); struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct folio *folio); struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages, extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
@ -230,14 +230,14 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page) struct folio *folio)
{ {
} }
static inline void static inline void
hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page) struct folio *folio)
{ {
} }

View File

@ -1348,7 +1348,7 @@ static unsigned long available_huge_pages(struct hstate *h)
return h->free_huge_pages - h->resv_huge_pages; return h->free_huge_pages - h->resv_huge_pages;
} }
static struct page *dequeue_huge_page_vma(struct hstate *h, static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long address, int avoid_reserve, unsigned long address, int avoid_reserve,
long chg) long chg)
@ -1392,7 +1392,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
} }
mpol_cond_put(mpol); mpol_cond_put(mpol);
return &folio->page; return folio;
err: err:
return NULL; return NULL;
@ -2446,7 +2446,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
* Use the VMA's mpolicy to allocate a huge page from the buddy. * Use the VMA's mpolicy to allocate a huge page from the buddy.
*/ */
static static
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr) struct vm_area_struct *vma, unsigned long addr)
{ {
struct folio *folio = NULL; struct folio *folio = NULL;
@ -2469,7 +2469,7 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
if (!folio) if (!folio)
folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol); mpol_cond_put(mpol);
return &folio->page; return folio;
} }
/* page migration callback function */ /* page migration callback function */
@ -3018,7 +3018,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
{ {
struct hugepage_subpool *spool = subpool_vma(vma); struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct page *page;
struct folio *folio; struct folio *folio;
long map_chg, map_commit; long map_chg, map_commit;
long gbl_chg; long gbl_chg;
@ -3082,34 +3081,34 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
* from the global free pool (global change). gbl_chg == 0 indicates * from the global free pool (global change). gbl_chg == 0 indicates
* a reservation exists for the allocation. * a reservation exists for the allocation.
*/ */
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
if (!page) { if (!folio) {
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
page = alloc_buddy_huge_page_with_mpol(h, vma, addr); folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
if (!page) if (!folio)
goto out_uncharge_cgroup; goto out_uncharge_cgroup;
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
SetHPageRestoreReserve(page); folio_set_hugetlb_restore_reserve(folio);
h->resv_huge_pages--; h->resv_huge_pages--;
} }
list_add(&page->lru, &h->hugepage_activelist); list_add(&folio->lru, &h->hugepage_activelist);
set_page_refcounted(page); folio_ref_unfreeze(folio, 1);
/* Fall through */ /* Fall through */
} }
folio = page_folio(page);
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
/* If allocation is not consuming a reservation, also store the /* If allocation is not consuming a reservation, also store the
* hugetlb_cgroup pointer on the page. * hugetlb_cgroup pointer on the page.
*/ */
if (deferred_reserve) { if (deferred_reserve) {
hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
h_cg, page); h_cg, folio);
} }
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
hugetlb_set_page_subpool(page, spool); hugetlb_set_folio_subpool(folio, spool);
map_commit = vma_commit_reservation(h, vma, addr); map_commit = vma_commit_reservation(h, vma, addr);
if (unlikely(map_chg > map_commit)) { if (unlikely(map_chg > map_commit)) {
@ -3130,7 +3129,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio); pages_per_huge_page(h), folio);
} }
return page; return &folio->page;
out_uncharge_cgroup: out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);

View File

@ -331,19 +331,15 @@ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page) struct folio *folio)
{ {
struct folio *folio = page_folio(page);
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false); __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
} }
void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page) struct folio *folio)
{ {
struct folio *folio = page_folio(page);
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true); __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
} }