Merge branch 'for-next/mm' into for-next/core

* for-next/mm:
  arm64: mm: update max_pfn after memory hotplug
  arm64/mm: Add pud_sect_supported()
  arm64: mm: Drop pointless call to set_max_mapnr()
This commit is contained in:
Will Deacon 2021-10-29 12:25:04 +01:00
commit dc6bab18fb
5 changed files with 27 additions and 15 deletions

View File

@ -1022,6 +1022,11 @@ static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
return PAGE_READONLY_EXEC;
}
static inline bool pud_sect_supported(void)
{
return PAGE_SIZE == SZ_4K;
}
#endif /* !__ASSEMBLY__ */

View File

@ -2,6 +2,7 @@
#define _ASM_ARM64_VMALLOC_H
#include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
@ -9,10 +10,9 @@
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
/*
* Only 4k granule supports level 1 block mappings.
* SW table walks can't handle removal of intermediate entries.
*/
return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
return pud_sect_supported() &&
!IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}

View File

@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void)
{
int order;
#ifdef CONFIG_ARM64_4K_PAGES
order = PUD_SHIFT - PAGE_SHIFT;
#else
order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
#endif
if (pud_sect_supported())
order = PUD_SHIFT - PAGE_SHIFT;
else
order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
/*
* HugeTLB CMA reservation is required for gigantic
* huge pages which could not be allocated via the
@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
size_t pagesize = huge_page_size(h);
switch (pagesize) {
#ifdef CONFIG_ARM64_4K_PAGES
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return pud_sect_supported();
#endif
case PMD_SIZE:
case CONT_PMD_SIZE:
@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = size;
switch (size) {
#ifdef CONFIG_ARM64_4K_PAGES
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
if (pud_sect_supported())
contig_ptes = 1;
break;
#endif
case PMD_SIZE:
contig_ptes = 1;
@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
static int __init hugetlbpage_init(void)
{
#ifdef CONFIG_ARM64_4K_PAGES
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
#endif
if (pud_sect_supported())
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init);
bool __init arch_hugetlb_valid_size(unsigned long size)
{
switch (size) {
#ifdef CONFIG_ARM64_4K_PAGES
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return pud_sect_supported();
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:

View File

@ -416,8 +416,6 @@ void __init mem_init(void)
else if (!xen_swiotlb_detect())
swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
/* this will put all unused low memory onto the freelists */
memblock_free_all();

View File

@ -1499,6 +1499,11 @@ int arch_add_memory(int nid, u64 start, u64 size,
if (ret)
__remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size);
else {
max_pfn = PFN_UP(start + size);
max_low_pfn = max_pfn;
}
return ret;
}