mm: hugetlb_vmemmap: convert page to folio

There are still some places where it does not be converted to folio, this
patch convert all of them to folio.  And this patch also does some trival
cleanup to fix the code style problems.

Link: https://lkml.kernel.org/r/20231127084645.27017-5-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Muchun Song 2023-11-27 16:46:45 +08:00 committed by Andrew Morton
parent be035a2acf
commit ebc20dcac4

View File

@ -280,7 +280,7 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
* Return: %0 on success, negative error code otherwise.
*/
static int vmemmap_remap_split(unsigned long start, unsigned long end,
unsigned long reuse)
unsigned long reuse)
{
int ret;
struct vmemmap_remap_walk walk = {
@ -447,14 +447,14 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags)
static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
struct folio *folio, unsigned long flags)
{
int ret;
struct page *head = &folio->page;
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE(!PageHuge(head));
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
@ -517,7 +517,7 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = __hugetlb_vmemmap_restore_folio(h, folio,
VMEMMAP_REMAP_NO_TLB_FLUSH);
VMEMMAP_REMAP_NO_TLB_FLUSH);
if (ret)
break;
restored++;
@ -535,9 +535,9 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
}
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
{
if (HPageVmemmapOptimized((struct page *)head))
if (folio_test_hugetlb_vmemmap_optimized(folio))
return false;
if (!READ_ONCE(vmemmap_optimize_enabled))
@ -550,17 +550,16 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
}
static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
struct folio *folio,
struct list_head *vmemmap_pages,
unsigned long flags)
struct folio *folio,
struct list_head *vmemmap_pages,
unsigned long flags)
{
int ret = 0;
struct page *head = &folio->page;
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE(!PageHuge(head));
if (!vmemmap_should_optimize(h, head))
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
if (!vmemmap_should_optimize_folio(h, folio))
return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
@ -588,7 +587,7 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
* the caller.
*/
ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse,
vmemmap_pages, flags);
vmemmap_pages, flags);
if (ret) {
static_branch_dec(&hugetlb_optimize_vmemmap_key);
folio_clear_hugetlb_vmemmap_optimized(folio);
@ -615,12 +614,12 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
free_vmemmap_page_list(&vmemmap_pages);
}
static int hugetlb_vmemmap_split(const struct hstate *h, struct page *head)
static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
{
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
if (!vmemmap_should_optimize(h, head))
if (!vmemmap_should_optimize_folio(h, folio))
return 0;
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
@ -640,7 +639,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
LIST_HEAD(vmemmap_pages);
list_for_each_entry(folio, folio_list, lru) {
int ret = hugetlb_vmemmap_split(h, &folio->page);
int ret = hugetlb_vmemmap_split_folio(h, folio);
/*
* Spliting the PMD requires allocating a page, thus lets fail
@ -655,9 +654,10 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
list_for_each_entry(folio, folio_list, lru) {
int ret = __hugetlb_vmemmap_optimize_folio(h, folio,
&vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH);
int ret;
ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH);
/*
* Pages to be freed may have been accumulated. If we
@ -671,9 +671,8 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
__hugetlb_vmemmap_optimize_folio(h, folio,
&vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH);
__hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH);
}
}