mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 07:01:32 +00:00
0dd4f60a2c
In order to support LPA2 on 16k pages in a way that permits non-LPA2 systems to run the same kernel image, we have to be able to fall back to at most 48 bits of virtual addressing. Falling back to 48 bits would result in a level 0 with only 2 entries, which is suboptimal in terms of TLB utilization. So instead, let's fall back to 47 bits in that case. This means we need to be able to fold PUDs dynamically, similar to how we fold P4Ds for 48 bit virtual addressing on LPA2 with 4k pages. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20240214122845.2033971-81-ardb+git@google.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
68 lines
1.4 KiB
C
68 lines
1.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* PGD allocation/freeing
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/page.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
static struct kmem_cache *pgd_cache __ro_after_init;
|
|
|
|
static bool pgdir_is_page_size(void)
|
|
{
|
|
if (PGD_SIZE == PAGE_SIZE)
|
|
return true;
|
|
if (CONFIG_PGTABLE_LEVELS == 4)
|
|
return !pgtable_l4_enabled();
|
|
if (CONFIG_PGTABLE_LEVELS == 5)
|
|
return !pgtable_l5_enabled();
|
|
return false;
|
|
}
|
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
gfp_t gfp = GFP_PGTABLE_USER;
|
|
|
|
if (pgdir_is_page_size())
|
|
return (pgd_t *)__get_free_page(gfp);
|
|
else
|
|
return kmem_cache_alloc(pgd_cache, gfp);
|
|
}
|
|
|
|
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
if (pgdir_is_page_size())
|
|
free_page((unsigned long)pgd);
|
|
else
|
|
kmem_cache_free(pgd_cache, pgd);
|
|
}
|
|
|
|
void __init pgtable_cache_init(void)
|
|
{
|
|
if (pgdir_is_page_size())
|
|
return;
|
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
|
/*
|
|
* With 52-bit physical addresses, the architecture requires the
|
|
* top-level table to be aligned to at least 64 bytes.
|
|
*/
|
|
BUILD_BUG_ON(PGD_SIZE < 64);
|
|
#endif
|
|
|
|
/*
|
|
* Naturally aligned pgds required by the architecture.
|
|
*/
|
|
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
|
|
SLAB_PANIC, NULL);
|
|
}
|