mm: sparsemem: use huge PMD mapping for vmemmap pages

The preparation of splitting huge PMD mapping of vmemmap pages is ready,
so switch the mapping from PTE to PMD.

Link: https://lkml.kernel.org/r/20210616094915.34432-3-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Chen Huang <chenhuang5@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Muchun Song 2021-06-30 18:48:25 -07:00 committed by Linus Torvalds
parent 3bc2b6a725
commit 2d7a21715f
4 changed files with 9 additions and 33 deletions

View File

@ -1572,13 +1572,6 @@
enabled.
Allows heavy hugetlb users to free up some more
memory (6 * PAGE_SIZE for each 2MB hugetlb page).
This feauture is not free though. Large page
tables are not used to back vmemmap pages which
can lead to a performance degradation for some
workloads. Also there will be memory allocation
required when hugetlb pages are freed from the
pool which can lead to corner cases under heavy
memory pressure.
Format: { on | off (default) }
on: enable the feature

View File

@ -34,7 +34,6 @@
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/bootmem_info.h>
#include <linux/hugetlb.h>
#include <asm/processor.h>
#include <asm/bios_ebda.h>
@ -1610,8 +1609,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
if ((is_hugetlb_free_vmemmap_enabled() && !altmap) ||
end - start < PAGES_PER_SECTION * sizeof(struct page))
if (end - start < PAGES_PER_SECTION * sizeof(struct page))
err = vmemmap_populate_basepages(start, end, node, NULL);
else if (boot_cpu_has(X86_FEATURE_PSE))
err = vmemmap_populate_hugepages(start, end, node, altmap);
@ -1639,8 +1637,6 @@ void register_page_bootmem_memmap(unsigned long section_nr,
pmd_t *pmd;
unsigned int nr_pmd_pages;
struct page *page;
bool base_mapping = !boot_cpu_has(X86_FEATURE_PSE) ||
is_hugetlb_free_vmemmap_enabled();
for (; addr < end; addr = next) {
pte_t *pte = NULL;
@ -1666,7 +1662,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
}
get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
if (base_mapping) {
if (!boot_cpu_has(X86_FEATURE_PSE)) {
next = (addr + PAGE_SIZE) & PAGE_MASK;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))

View File

@ -895,20 +895,6 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
extern bool hugetlb_free_vmemmap_enabled;
static inline bool is_hugetlb_free_vmemmap_enabled(void)
{
return hugetlb_free_vmemmap_enabled;
}
#else
static inline bool is_hugetlb_free_vmemmap_enabled(void)
{
return false;
}
#endif
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@ -1063,13 +1049,14 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
pte_t *ptep, pte_t pte, unsigned long sz)
{
}
static inline bool is_hugetlb_free_vmemmap_enabled(void)
{
return false;
}
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
extern bool hugetlb_free_vmemmap_enabled;
#else
#define hugetlb_free_vmemmap_enabled false
#endif
static inline spinlock_t *huge_pte_lock(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{

View File

@ -1056,7 +1056,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size)
* populate a single PMD.
*/
return memmap_on_memory &&
!is_hugetlb_free_vmemmap_enabled() &&
!hugetlb_free_vmemmap_enabled &&
IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) &&
size == memory_block_size_bytes() &&
IS_ALIGNED(vmemmap_size, PMD_SIZE) &&