arm64/hugetlb: Reserve CMA areas for gigantic pages on 16K and 64K configs

Currently 'hugetlb_cma=' command line argument does not create CMA area on
ARM64_16K_PAGES and ARM64_64K_PAGES based platforms. Instead, it just ends
up with the following warning message. Reason being, hugetlb_cma_reserve()
never gets called for these huge page sizes.

[   64.255669] hugetlb_cma: the option isn't supported by current arch

This enables CMA areas reservation on ARM64_16K_PAGES and ARM64_64K_PAGES
configs by defining an unified arm64_hugetlb_cma_reseve() that is wrapped
in CONFIG_CMA. Call site for arm64_hugetlb_cma_reserve() is also protected
as <asm/hugetlb.h> is conditionally included and hence cannot contain stub
for the inverse config i.e !(CONFIG_HUGETLB_PAGE && CONFIG_CMA).

Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Link: https://lore.kernel.org/r/1593578521-24672-1-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Anshuman Khandual 2020-07-01 10:12:01 +05:30 committed by Catalin Marinas
parent 0de674afe8
commit abb7962adc
3 changed files with 42 additions and 2 deletions

View File

@ -49,6 +49,8 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz); pte_t *ptep, pte_t pte, unsigned long sz);
#define set_huge_swap_pte_at set_huge_swap_pte_at #define set_huge_swap_pte_at set_huge_swap_pte_at
void __init arm64_hugetlb_cma_reserve(void);
#include <asm-generic/hugetlb.h> #include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */ #endif /* __ASM_HUGETLB_H */

View File

@ -19,6 +19,44 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
/*
* HugeTLB Support Matrix
*
* ---------------------------------------------------
* | Page Size | CONT PTE | PMD | CONT PMD | PUD |
* ---------------------------------------------------
* | 4K | 64K | 2M | 32M | 1G |
* | 16K | 2M | 32M | 1G | |
* | 64K | 2M | 512M | 16G | |
* ---------------------------------------------------
*/
/*
* Reserve CMA areas for the largest supported gigantic
* huge page when requested. Any other smaller gigantic
* huge pages could still be served from those areas.
*/
#ifdef CONFIG_CMA
void __init arm64_hugetlb_cma_reserve(void)
{
int order;
#ifdef CONFIG_ARM64_4K_PAGES
order = PUD_SHIFT - PAGE_SHIFT;
#else
order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
#endif
/*
* HugeTLB CMA reservation is required for gigantic
* huge pages which could not be allocated via the
* page allocator. Just warn if there is any change
* breaking this assumption.
*/
WARN_ON(order <= MAX_ORDER);
hugetlb_cma_reserve(order);
}
#endif /* CONFIG_CMA */
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
bool arch_hugetlb_migration_supported(struct hstate *h) bool arch_hugetlb_migration_supported(struct hstate *h)
{ {

View File

@ -425,8 +425,8 @@ void __init bootmem_init(void)
* initialize node_online_map that gets used in hugetlb_cma_reserve() * initialize node_online_map that gets used in hugetlb_cma_reserve()
* while allocating required CMA size across online nodes. * while allocating required CMA size across online nodes.
*/ */
#ifdef CONFIG_ARM64_4K_PAGES #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); arm64_hugetlb_cma_reserve();
#endif #endif
/* /*