mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
mm/hugetlb: convert destroy_compound_gigantic_page() to folios
Convert page operations within __destroy_compound_gigantic_page() to the corresponding folio operations. Link: https://lkml.kernel.org/r/20221129225039.82257-3-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Tarun Sahu <tsahu@linux.ibm.com> Cc: Wei Chen <harperchen1110@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9fd330582b
commit
911565b828
43
mm/hugetlb.c
43
mm/hugetlb.c
@ -1325,43 +1325,40 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
|
||||
nr_nodes--)
|
||||
|
||||
/* used to demote non-gigantic_huge pages as well */
|
||||
static void __destroy_compound_gigantic_page(struct page *page,
|
||||
static void __destroy_compound_gigantic_folio(struct folio *folio,
|
||||
unsigned int order, bool demote)
|
||||
{
|
||||
int i;
|
||||
int nr_pages = 1 << order;
|
||||
struct page *p;
|
||||
|
||||
atomic_set(compound_mapcount_ptr(page), 0);
|
||||
atomic_set(subpages_mapcount_ptr(page), 0);
|
||||
atomic_set(compound_pincount_ptr(page), 0);
|
||||
atomic_set(folio_mapcount_ptr(folio), 0);
|
||||
atomic_set(folio_subpages_mapcount_ptr(folio), 0);
|
||||
atomic_set(folio_pincount_ptr(folio), 0);
|
||||
|
||||
for (i = 1; i < nr_pages; i++) {
|
||||
p = nth_page(page, i);
|
||||
p = folio_page(folio, i);
|
||||
p->mapping = NULL;
|
||||
clear_compound_head(p);
|
||||
if (!demote)
|
||||
set_page_refcounted(p);
|
||||
}
|
||||
|
||||
set_compound_order(page, 0);
|
||||
#ifdef CONFIG_64BIT
|
||||
page[1].compound_nr = 0;
|
||||
#endif
|
||||
__ClearPageHead(page);
|
||||
folio_set_compound_order(folio, 0);
|
||||
__folio_clear_head(folio);
|
||||
}
|
||||
|
||||
static void destroy_compound_hugetlb_page_for_demote(struct page *page,
|
||||
static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
|
||||
unsigned int order)
|
||||
{
|
||||
__destroy_compound_gigantic_page(page, order, true);
|
||||
__destroy_compound_gigantic_folio(folio, order, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
||||
static void destroy_compound_gigantic_page(struct page *page,
|
||||
static void destroy_compound_gigantic_folio(struct folio *folio,
|
||||
unsigned int order)
|
||||
{
|
||||
__destroy_compound_gigantic_page(page, order, false);
|
||||
__destroy_compound_gigantic_folio(folio, order, false);
|
||||
}
|
||||
|
||||
static void free_gigantic_page(struct page *page, unsigned int order)
|
||||
@ -1430,7 +1427,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
|
||||
return NULL;
|
||||
}
|
||||
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
|
||||
static inline void destroy_compound_gigantic_page(struct page *page,
|
||||
static inline void destroy_compound_gigantic_folio(struct folio *folio,
|
||||
unsigned int order) { }
|
||||
#endif
|
||||
|
||||
@ -1477,8 +1474,8 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
|
||||
*
|
||||
* For gigantic pages set the destructor to the null dtor. This
|
||||
* destructor will never be called. Before freeing the gigantic
|
||||
* page destroy_compound_gigantic_page will turn the compound page
|
||||
* into a simple group of pages. After this the destructor does not
|
||||
* page destroy_compound_gigantic_folio will turn the folio into a
|
||||
* simple group of pages. After this the destructor does not
|
||||
* apply.
|
||||
*
|
||||
* This handles the case where more than one ref is held when and
|
||||
@ -1559,6 +1556,7 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
|
||||
static void __update_and_free_page(struct hstate *h, struct page *page)
|
||||
{
|
||||
int i;
|
||||
struct folio *folio = page_folio(page);
|
||||
struct page *subpage;
|
||||
|
||||
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
|
||||
@ -1587,8 +1585,8 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
|
||||
* Move PageHWPoison flag from head page to the raw error pages,
|
||||
* which makes any healthy subpages reusable.
|
||||
*/
|
||||
if (unlikely(PageHWPoison(page)))
|
||||
hugetlb_clear_page_hwpoison(page);
|
||||
if (unlikely(folio_test_hwpoison(folio)))
|
||||
hugetlb_clear_page_hwpoison(&folio->page);
|
||||
|
||||
for (i = 0; i < pages_per_huge_page(h); i++) {
|
||||
subpage = nth_page(page, i);
|
||||
@ -1604,7 +1602,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
|
||||
*/
|
||||
if (hstate_is_gigantic(h) ||
|
||||
hugetlb_cma_page(page, huge_page_order(h))) {
|
||||
destroy_compound_gigantic_page(page, huge_page_order(h));
|
||||
destroy_compound_gigantic_folio(folio, huge_page_order(h));
|
||||
free_gigantic_page(page, huge_page_order(h));
|
||||
} else {
|
||||
__free_pages(page, huge_page_order(h));
|
||||
@ -3437,6 +3435,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
|
||||
{
|
||||
int i, nid = page_to_nid(page);
|
||||
struct hstate *target_hstate;
|
||||
struct folio *folio = page_folio(page);
|
||||
struct page *subpage;
|
||||
int rc = 0;
|
||||
|
||||
@ -3455,10 +3454,10 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
|
||||
}
|
||||
|
||||
/*
|
||||
* Use destroy_compound_hugetlb_page_for_demote for all huge page
|
||||
* Use destroy_compound_hugetlb_folio_for_demote for all huge page
|
||||
* sizes as it will not ref count pages.
|
||||
*/
|
||||
destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h));
|
||||
destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
|
||||
|
||||
/*
|
||||
* Taking target hstate mutex synchronizes with set_max_huge_pages.
|
||||
|
Loading…
Reference in New Issue
Block a user