mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm/hugetlb: convert alloc_migrate_huge_page to folios
Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio(). Both functions now return a folio rather than a page. Link: https://lkml.kernel.org/r/20230113223057.173292-7-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ff7d853b03
commit
e37d3e838d
@ -719,7 +719,7 @@ struct huge_bootmem_page {
|
||||
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
|
||||
struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
unsigned long addr, int avoid_reserve);
|
||||
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
||||
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask);
|
||||
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
|
||||
unsigned long address);
|
||||
@ -1040,8 +1040,8 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
||||
static inline struct folio *
|
||||
alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask)
|
||||
{
|
||||
return NULL;
|
||||
|
18
mm/hugetlb.c
18
mm/hugetlb.c
@ -2419,7 +2419,7 @@ out_unlock:
|
||||
return folio;
|
||||
}
|
||||
|
||||
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
|
||||
static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nmask)
|
||||
{
|
||||
struct folio *folio;
|
||||
@ -2439,7 +2439,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
|
||||
*/
|
||||
folio_set_hugetlb_temporary(folio);
|
||||
|
||||
return &folio->page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2472,8 +2472,8 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
|
||||
return folio;
|
||||
}
|
||||
|
||||
/* page migration callback function */
|
||||
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
||||
/* folio migration callback function */
|
||||
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask)
|
||||
{
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
@ -2484,12 +2484,12 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
||||
preferred_nid, nmask);
|
||||
if (folio) {
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
return &folio->page;
|
||||
return folio;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
|
||||
return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
|
||||
return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
|
||||
}
|
||||
|
||||
/* mempolicy aware migration callback */
|
||||
@ -2498,16 +2498,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
|
||||
{
|
||||
struct mempolicy *mpol;
|
||||
nodemask_t *nodemask;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
gfp_t gfp_mask;
|
||||
int node;
|
||||
|
||||
gfp_mask = htlb_alloc_mask(h);
|
||||
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
|
||||
page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
|
||||
folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
|
||||
mpol_cond_put(mpol);
|
||||
|
||||
return page;
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1663,6 +1663,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
|
||||
struct migration_target_control *mtc;
|
||||
gfp_t gfp_mask;
|
||||
unsigned int order = 0;
|
||||
struct folio *hugetlb_folio = NULL;
|
||||
struct folio *new_folio = NULL;
|
||||
int nid;
|
||||
int zidx;
|
||||
@ -1677,7 +1678,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
|
||||
struct hstate *h = folio_hstate(folio);
|
||||
|
||||
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
|
||||
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
|
||||
hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
|
||||
mtc->nmask, gfp_mask);
|
||||
return &hugetlb_folio->page;
|
||||
}
|
||||
|
||||
if (folio_test_large(folio)) {
|
||||
|
Loading…
Reference in New Issue
Block a user