mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm/hugetlb: convert putback_active_hugepage to take in a folio
Convert putback_active_hugepage() to folio_putback_active_hugetlb(), this removes one user of the Huge Page macros which take in a page. The callers in migrate.c are also cleaned up by being able to directly use the src and dst folio variables. Link: https://lkml.kernel.org/r/20230125170537.96973-4-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
91a2fb956a
commit
ea8e72f411
@ -175,7 +175,7 @@ int isolate_hugetlb(struct folio *folio, struct list_head *list);
|
||||
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
|
||||
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
|
||||
bool *migratable_cleared);
|
||||
void putback_active_hugepage(struct page *page);
|
||||
void folio_putback_active_hugetlb(struct folio *folio);
|
||||
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
|
||||
void free_huge_page(struct page *page);
|
||||
void hugetlb_fix_reserve_counts(struct inode *inode);
|
||||
@ -429,7 +429,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void putback_active_hugepage(struct page *page)
|
||||
static inline void folio_putback_active_hugetlb(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -7300,13 +7300,13 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void putback_active_hugepage(struct page *page)
|
||||
void folio_putback_active_hugetlb(struct folio *folio)
|
||||
{
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
SetHPageMigratable(page);
|
||||
list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
|
||||
folio_set_hugetlb_migratable(folio);
|
||||
list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
|
||||
|
@ -151,7 +151,7 @@ void putback_movable_pages(struct list_head *l)
|
||||
|
||||
list_for_each_entry_safe(page, page2, l, lru) {
|
||||
if (unlikely(PageHuge(page))) {
|
||||
putback_active_hugepage(page);
|
||||
folio_putback_active_hugetlb(page_folio(page));
|
||||
continue;
|
||||
}
|
||||
list_del(&page->lru);
|
||||
@ -1298,7 +1298,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
||||
|
||||
if (folio_ref_count(src) == 1) {
|
||||
/* page was freed from under us. So we are done. */
|
||||
putback_active_hugepage(hpage);
|
||||
folio_putback_active_hugetlb(src);
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
}
|
||||
|
||||
@ -1383,7 +1383,7 @@ out_unlock:
|
||||
folio_unlock(src);
|
||||
out:
|
||||
if (rc == MIGRATEPAGE_SUCCESS)
|
||||
putback_active_hugepage(hpage);
|
||||
folio_putback_active_hugetlb(src);
|
||||
else if (rc != -EAGAIN)
|
||||
list_move_tail(&src->lru, ret);
|
||||
|
||||
@ -1395,7 +1395,7 @@ out:
|
||||
if (put_new_page)
|
||||
put_new_page(new_hpage, private);
|
||||
else
|
||||
putback_active_hugepage(new_hpage);
|
||||
folio_putback_active_hugetlb(dst);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user