mirror of
https://github.com/torvalds/linux.git
synced 2024-12-13 22:53:20 +00:00
arm64: mm: Introduce vabits_actual
In order to support 52-bit kernel addresses detectable at boot time, one needs to know the actual VA_BITS detected. A new variable vabits_actual is introduced in this commit and employed for the KVM hypervisor layout, KASAN, fault handling and phys-to/from-virt translation where there would normally be compile time constants. In order to maintain performance in phys_to_virt, another variable physvirt_offset is introduced. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
90ec95cda9
commit
5383cc6efe
@ -31,7 +31,7 @@
|
||||
* (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
|
||||
*/
|
||||
#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
|
||||
#define KASAN_SHADOW_START _KASAN_SHADOW_START(VA_BITS)
|
||||
#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
|
||||
|
||||
void kasan_init(void);
|
||||
void kasan_copy_shadow(pgd_t *pgdir);
|
||||
|
@ -37,8 +37,6 @@
|
||||
* VA_START - the first kernel virtual address.
|
||||
*/
|
||||
#define VA_BITS (CONFIG_ARM64_VA_BITS)
|
||||
#define VA_START (UL(0xffffffffffffffff) - \
|
||||
(UL(1) << (VA_BITS - 1)) + 1)
|
||||
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
|
||||
(UL(1) << VA_BITS) + 1)
|
||||
#define KIMAGE_VADDR (MODULES_END)
|
||||
@ -166,10 +164,13 @@
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern u64 vabits_actual;
|
||||
#define VA_START (_VA_START(vabits_actual))
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/mmdebug.h>
|
||||
|
||||
extern s64 physvirt_offset;
|
||||
extern s64 memstart_addr;
|
||||
/* PHYS_OFFSET - the physical address of the start of memory. */
|
||||
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
|
||||
@ -240,9 +241,9 @@ static inline const void *__tag_set(const void *addr, u8 tag)
|
||||
* space. Testing the top bit for the start of the region is a
|
||||
* sufficient check.
|
||||
*/
|
||||
#define __is_lm_address(addr) (!((addr) & BIT(VA_BITS - 1)))
|
||||
#define __is_lm_address(addr) (!((addr) & BIT(vabits_actual - 1)))
|
||||
|
||||
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
|
||||
#define __lm_to_phys(addr) (((addr) + physvirt_offset))
|
||||
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
|
||||
|
||||
#define __virt_to_phys_nodebug(x) ({ \
|
||||
@ -261,7 +262,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
|
||||
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
|
||||
#endif
|
||||
|
||||
#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
|
||||
#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
|
||||
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
|
||||
|
||||
/*
|
||||
|
@ -95,7 +95,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
|
||||
isb();
|
||||
}
|
||||
|
||||
#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
|
||||
#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
|
||||
#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
|
||||
|
||||
/*
|
||||
|
@ -321,6 +321,11 @@ __create_page_tables:
|
||||
dmb sy
|
||||
dc ivac, x6 // Invalidate potentially stale cache line
|
||||
|
||||
adr_l x6, vabits_actual
|
||||
str x5, [x6]
|
||||
dmb sy
|
||||
dc ivac, x6 // Invalidate potentially stale cache line
|
||||
|
||||
/*
|
||||
* VA_BITS may be too small to allow for an ID mapping to be created
|
||||
* that covers system RAM if that is located sufficiently high in the
|
||||
|
@ -29,25 +29,25 @@ static void compute_layout(void)
|
||||
int kva_msb;
|
||||
|
||||
/* Where is my RAM region? */
|
||||
hyp_va_msb = idmap_addr & BIT(VA_BITS - 1);
|
||||
hyp_va_msb ^= BIT(VA_BITS - 1);
|
||||
hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
|
||||
hyp_va_msb ^= BIT(vabits_actual - 1);
|
||||
|
||||
kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
|
||||
(u64)(high_memory - 1));
|
||||
|
||||
if (kva_msb == (VA_BITS - 1)) {
|
||||
if (kva_msb == (vabits_actual - 1)) {
|
||||
/*
|
||||
* No space in the address, let's compute the mask so
|
||||
* that it covers (VA_BITS - 1) bits, and the region
|
||||
* that it covers (vabits_actual - 1) bits, and the region
|
||||
* bit. The tag stays set to zero.
|
||||
*/
|
||||
va_mask = BIT(VA_BITS - 1) - 1;
|
||||
va_mask = BIT(vabits_actual - 1) - 1;
|
||||
va_mask |= hyp_va_msb;
|
||||
} else {
|
||||
/*
|
||||
* We do have some free bits to insert a random tag.
|
||||
* Hyp VAs are now created from kernel linear map VAs
|
||||
* using the following formula (with V == VA_BITS):
|
||||
* using the following formula (with V == vabits_actual):
|
||||
*
|
||||
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
|
||||
* ---------------------------------------------------------
|
||||
@ -55,7 +55,7 @@ static void compute_layout(void)
|
||||
*/
|
||||
tag_lsb = kva_msb;
|
||||
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
|
||||
tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb);
|
||||
tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
|
||||
tag_val |= hyp_va_msb;
|
||||
tag_val >>= tag_lsb;
|
||||
}
|
||||
|
@ -138,9 +138,9 @@ static void show_pte(unsigned long addr)
|
||||
return;
|
||||
}
|
||||
|
||||
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n",
|
||||
pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
|
||||
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
|
||||
mm == &init_mm ? VA_BITS : (int)vabits_user,
|
||||
mm == &init_mm ? vabits_actual : (int)vabits_user,
|
||||
(unsigned long)virt_to_phys(mm->pgd));
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
|
@ -50,6 +50,9 @@
|
||||
s64 memstart_addr __ro_after_init = -1;
|
||||
EXPORT_SYMBOL(memstart_addr);
|
||||
|
||||
s64 physvirt_offset __ro_after_init;
|
||||
EXPORT_SYMBOL(physvirt_offset);
|
||||
|
||||
phys_addr_t arm64_dma_phys_limit __ro_after_init;
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
@ -301,7 +304,7 @@ static void __init fdt_enforce_memory_region(void)
|
||||
|
||||
void __init arm64_memblock_init(void)
|
||||
{
|
||||
const s64 linear_region_size = BIT(VA_BITS - 1);
|
||||
const s64 linear_region_size = BIT(vabits_actual - 1);
|
||||
|
||||
/* Handle linux,usable-memory-range property */
|
||||
fdt_enforce_memory_region();
|
||||
@ -315,6 +318,8 @@ void __init arm64_memblock_init(void)
|
||||
memstart_addr = round_down(memblock_start_of_DRAM(),
|
||||
ARM64_MEMSTART_ALIGN);
|
||||
|
||||
physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
|
||||
|
||||
/*
|
||||
* Remove the memory that we will not be able to cover with the
|
||||
* linear mapping. Take care not to clip the kernel which may be
|
||||
|
@ -43,6 +43,9 @@ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
||||
u64 vabits_user __ro_after_init;
|
||||
EXPORT_SYMBOL(vabits_user);
|
||||
|
||||
u64 __section(".mmuoff.data.write") vabits_actual;
|
||||
EXPORT_SYMBOL(vabits_actual);
|
||||
|
||||
u64 kimage_voffset __ro_after_init;
|
||||
EXPORT_SYMBOL(kimage_voffset);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user