mm/migrate: Use a folio in alloc_migration_target()
This removes an assumption that a large folio is HPAGE_PMD_ORDER as well as letting us remove the call to prep_transhuge_page() and a few hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
parent
83a8441f8d
commit
ffe06786b5
22
mm/migrate.c
22
mm/migrate.c
@ -1520,10 +1520,11 @@ out:
|
|||||||
|
|
||||||
struct page *alloc_migration_target(struct page *page, unsigned long private)
|
struct page *alloc_migration_target(struct page *page, unsigned long private)
|
||||||
{
|
{
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
struct migration_target_control *mtc;
|
struct migration_target_control *mtc;
|
||||||
gfp_t gfp_mask;
|
gfp_t gfp_mask;
|
||||||
unsigned int order = 0;
|
unsigned int order = 0;
|
||||||
struct page *new_page = NULL;
|
struct folio *new_folio = NULL;
|
||||||
int nid;
|
int nid;
|
||||||
int zidx;
|
int zidx;
|
||||||
|
|
||||||
@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
|
|||||||
gfp_mask = mtc->gfp_mask;
|
gfp_mask = mtc->gfp_mask;
|
||||||
nid = mtc->nid;
|
nid = mtc->nid;
|
||||||
if (nid == NUMA_NO_NODE)
|
if (nid == NUMA_NO_NODE)
|
||||||
nid = page_to_nid(page);
|
nid = folio_nid(folio);
|
||||||
|
|
||||||
if (PageHuge(page)) {
|
if (folio_test_hugetlb(folio)) {
|
||||||
struct hstate *h = page_hstate(compound_head(page));
|
struct hstate *h = page_hstate(&folio->page);
|
||||||
|
|
||||||
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
|
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
|
||||||
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
|
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PageTransHuge(page)) {
|
if (folio_test_large(folio)) {
|
||||||
/*
|
/*
|
||||||
* clear __GFP_RECLAIM to make the migration callback
|
* clear __GFP_RECLAIM to make the migration callback
|
||||||
* consistent with regular THP allocations.
|
* consistent with regular THP allocations.
|
||||||
*/
|
*/
|
||||||
gfp_mask &= ~__GFP_RECLAIM;
|
gfp_mask &= ~__GFP_RECLAIM;
|
||||||
gfp_mask |= GFP_TRANSHUGE;
|
gfp_mask |= GFP_TRANSHUGE;
|
||||||
order = HPAGE_PMD_ORDER;
|
order = folio_order(folio);
|
||||||
}
|
}
|
||||||
zidx = zone_idx(page_zone(page));
|
zidx = zone_idx(folio_zone(folio));
|
||||||
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
|
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
|
||||||
gfp_mask |= __GFP_HIGHMEM;
|
gfp_mask |= __GFP_HIGHMEM;
|
||||||
|
|
||||||
new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
|
new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
|
||||||
|
|
||||||
if (new_page && PageTransHuge(new_page))
|
return &new_folio->page;
|
||||||
prep_transhuge_page(new_page);
|
|
||||||
|
|
||||||
return new_page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
Loading…
Reference in New Issue
Block a user