Merge branch 'for-next/cpufeature' into for-next/core
Support for overriding CPU ID register fields on the command-line, which allows us to disable certain features which the kernel would otherwise use unconditionally when detected. * for-next/cpufeature: (22 commits) arm64: cpufeatures: Allow disabling of Pointer Auth from the command-line arm64: Defer enabling pointer authentication on boot core arm64: cpufeatures: Allow disabling of BTI from the command-line arm64: Move "nokaslr" over to the early cpufeature infrastructure KVM: arm64: Document HVC_VHE_RESTART stub hypercall arm64: Make kvm-arm.mode={nvhe, protected} an alias of id_aa64mmfr1.vh=0 arm64: Add an aliasing facility for the idreg override arm64: Honor VHE being disabled from the command-line arm64: Allow ID_AA64MMFR1_EL1.VH to be overridden from the command line arm64: cpufeature: Add an early command-line cpufeature override facility arm64: Extract early FDT mapping from kaslr_early_init() arm64: cpufeature: Use IDreg override in __read_sysreg_by_encoding() arm64: cpufeature: Add global feature override facility arm64: Move SCTLR_EL1 initialisation to EL-agnostic code arm64: Simplify init_el2_state to be non-VHE only arm64: Move VHE-specific SPE setup to mutate_to_vhe() arm64: Drop early setting of MDSCR_EL2.TPMS arm64: Initialise as nVHE before switching to VHE arm64: Provide an 'upgrade to VHE' stub hypercall arm64: Turn the MMU-on sequence into a macro ...
This commit is contained in:
commit
88ddf0df16
@ -373,6 +373,12 @@
|
|||||||
arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
|
arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
|
||||||
Format: <io>,<irq>,<nodeID>
|
Format: <io>,<irq>,<nodeID>
|
||||||
|
|
||||||
|
arm64.nobti [ARM64] Unconditionally disable Branch Target
|
||||||
|
Identification support
|
||||||
|
|
||||||
|
arm64.nopauth [ARM64] Unconditionally disable Pointer Authentication
|
||||||
|
support
|
||||||
|
|
||||||
ataflop= [HW,M68k]
|
ataflop= [HW,M68k]
|
||||||
|
|
||||||
atarimouse= [HW,MOUSE] Atari Mouse
|
atarimouse= [HW,MOUSE] Atari Mouse
|
||||||
@ -2257,6 +2263,9 @@
|
|||||||
kvm-arm.mode=
|
kvm-arm.mode=
|
||||||
[KVM,ARM] Select one of KVM/arm64's modes of operation.
|
[KVM,ARM] Select one of KVM/arm64's modes of operation.
|
||||||
|
|
||||||
|
nvhe: Standard nVHE-based mode, without support for
|
||||||
|
protected guests.
|
||||||
|
|
||||||
protected: nVHE-based mode with support for guests whose
|
protected: nVHE-based mode with support for guests whose
|
||||||
state is kept private from the host.
|
state is kept private from the host.
|
||||||
Not valid if the kernel is running in EL2.
|
Not valid if the kernel is running in EL2.
|
||||||
|
@ -58,6 +58,15 @@ these functions (see arch/arm{,64}/include/asm/virt.h):
|
|||||||
into place (arm64 only), and jump to the restart address while at HYP/EL2.
|
into place (arm64 only), and jump to the restart address while at HYP/EL2.
|
||||||
This hypercall is not expected to return to its caller.
|
This hypercall is not expected to return to its caller.
|
||||||
|
|
||||||
|
* ::
|
||||||
|
|
||||||
|
x0 = HVC_VHE_RESTART (arm64 only)
|
||||||
|
|
||||||
|
Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
|
||||||
|
the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
|
||||||
|
being off, and VHE not being disabled by any other means (command line
|
||||||
|
option, for example).
|
||||||
|
|
||||||
Any other value of r0/x0 triggers a hypervisor-specific handling,
|
Any other value of r0/x0 triggers a hypervisor-specific handling,
|
||||||
which is not documented here.
|
which is not documented here.
|
||||||
|
|
||||||
|
@ -675,6 +675,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
|||||||
.endif
|
.endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set SCTLR_EL1 to the passed value, and invalidate the local icache
|
||||||
|
* in the process. This is called when setting the MMU on.
|
||||||
|
*/
|
||||||
|
.macro set_sctlr_el1, reg
|
||||||
|
msr sctlr_el1, \reg
|
||||||
|
isb
|
||||||
|
/*
|
||||||
|
* Invalidate the local I-cache so that any instructions fetched
|
||||||
|
* speculatively from the PoC are discarded, since they may have
|
||||||
|
* been dynamically patched at the PoU.
|
||||||
|
*/
|
||||||
|
ic iallu
|
||||||
|
dsb nsh
|
||||||
|
isb
|
||||||
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether to yield to another runnable task from kernel mode NEON code
|
* Check whether to yield to another runnable task from kernel mode NEON code
|
||||||
* (which runs with preemption disabled).
|
* (which runs with preemption disabled).
|
||||||
|
@ -63,6 +63,11 @@ struct arm64_ftr_bits {
|
|||||||
s64 safe_val; /* safe value for FTR_EXACT features */
|
s64 safe_val; /* safe value for FTR_EXACT features */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct arm64_ftr_override {
|
||||||
|
u64 val;
|
||||||
|
u64 mask;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @arm64_ftr_reg - Feature register
|
* @arm64_ftr_reg - Feature register
|
||||||
* @strict_mask Bits which should match across all CPUs for sanity.
|
* @strict_mask Bits which should match across all CPUs for sanity.
|
||||||
@ -74,6 +79,7 @@ struct arm64_ftr_reg {
|
|||||||
u64 user_mask;
|
u64 user_mask;
|
||||||
u64 sys_val;
|
u64 sys_val;
|
||||||
u64 user_val;
|
u64 user_val;
|
||||||
|
struct arm64_ftr_override *override;
|
||||||
const struct arm64_ftr_bits *ftr_bits;
|
const struct arm64_ftr_bits *ftr_bits;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -600,6 +606,7 @@ void __init setup_cpu_features(void);
|
|||||||
void check_local_cpu_capabilities(void);
|
void check_local_cpu_capabilities(void);
|
||||||
|
|
||||||
u64 read_sanitised_ftr_reg(u32 id);
|
u64 read_sanitised_ftr_reg(u32 id);
|
||||||
|
u64 __read_sysreg_by_encoding(u32 sys_id);
|
||||||
|
|
||||||
static inline bool cpu_supports_mixed_endian_el0(void)
|
static inline bool cpu_supports_mixed_endian_el0(void)
|
||||||
{
|
{
|
||||||
@ -811,6 +818,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
|
|||||||
return 8;
|
return 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern struct arm64_ftr_override id_aa64mmfr1_override;
|
||||||
|
extern struct arm64_ftr_override id_aa64pfr1_override;
|
||||||
|
extern struct arm64_ftr_override id_aa64isar1_override;
|
||||||
|
|
||||||
u32 get_kvm_ipa_limit(void);
|
u32 get_kvm_ipa_limit(void);
|
||||||
void dump_cpu_features(void);
|
void dump_cpu_features(void);
|
||||||
|
|
||||||
|
@ -32,46 +32,39 @@
|
|||||||
* to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
|
* to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
|
||||||
* EL2.
|
* EL2.
|
||||||
*/
|
*/
|
||||||
.macro __init_el2_timers mode
|
.macro __init_el2_timers
|
||||||
.ifeqs "\mode", "nvhe"
|
|
||||||
mrs x0, cnthctl_el2
|
mrs x0, cnthctl_el2
|
||||||
orr x0, x0, #3 // Enable EL1 physical timers
|
orr x0, x0, #3 // Enable EL1 physical timers
|
||||||
msr cnthctl_el2, x0
|
msr cnthctl_el2, x0
|
||||||
.endif
|
|
||||||
msr cntvoff_el2, xzr // Clear virtual offset
|
msr cntvoff_el2, xzr // Clear virtual offset
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro __init_el2_debug mode
|
.macro __init_el2_debug
|
||||||
mrs x1, id_aa64dfr0_el1
|
mrs x1, id_aa64dfr0_el1
|
||||||
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
|
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
|
||||||
cmp x0, #1
|
cmp x0, #1
|
||||||
b.lt 1f // Skip if no PMU present
|
b.lt .Lskip_pmu_\@ // Skip if no PMU present
|
||||||
mrs x0, pmcr_el0 // Disable debug access traps
|
mrs x0, pmcr_el0 // Disable debug access traps
|
||||||
ubfx x0, x0, #11, #5 // to EL2 and allow access to
|
ubfx x0, x0, #11, #5 // to EL2 and allow access to
|
||||||
1:
|
.Lskip_pmu_\@:
|
||||||
csel x2, xzr, x0, lt // all PMU counters from EL1
|
csel x2, xzr, x0, lt // all PMU counters from EL1
|
||||||
|
|
||||||
/* Statistical profiling */
|
/* Statistical profiling */
|
||||||
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
|
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
|
||||||
cbz x0, 3f // Skip if SPE not present
|
cbz x0, .Lskip_spe_\@ // Skip if SPE not present
|
||||||
|
|
||||||
.ifeqs "\mode", "nvhe"
|
|
||||||
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
|
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
|
||||||
and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
|
and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
|
||||||
cbnz x0, 2f // then permit sampling of physical
|
cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical
|
||||||
mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
|
mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
|
||||||
1 << SYS_PMSCR_EL2_PA_SHIFT)
|
1 << SYS_PMSCR_EL2_PA_SHIFT)
|
||||||
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
|
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
|
||||||
2:
|
.Lskip_spe_el2_\@:
|
||||||
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
||||||
orr x2, x2, x0 // If we don't have VHE, then
|
orr x2, x2, x0 // If we don't have VHE, then
|
||||||
// use EL1&0 translation.
|
// use EL1&0 translation.
|
||||||
.else
|
|
||||||
orr x2, x2, #MDCR_EL2_TPMS // For VHE, use EL2 translation
|
|
||||||
// and disable access from EL1
|
|
||||||
.endif
|
|
||||||
|
|
||||||
3:
|
.Lskip_spe_\@:
|
||||||
msr mdcr_el2, x2 // Configure debug traps
|
msr mdcr_el2, x2 // Configure debug traps
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
@ -79,9 +72,9 @@
|
|||||||
.macro __init_el2_lor
|
.macro __init_el2_lor
|
||||||
mrs x1, id_aa64mmfr1_el1
|
mrs x1, id_aa64mmfr1_el1
|
||||||
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
|
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
|
||||||
cbz x0, 1f
|
cbz x0, .Lskip_lor_\@
|
||||||
msr_s SYS_LORC_EL1, xzr
|
msr_s SYS_LORC_EL1, xzr
|
||||||
1:
|
.Lskip_lor_\@:
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* Stage-2 translation */
|
/* Stage-2 translation */
|
||||||
@ -93,7 +86,7 @@
|
|||||||
.macro __init_el2_gicv3
|
.macro __init_el2_gicv3
|
||||||
mrs x0, id_aa64pfr0_el1
|
mrs x0, id_aa64pfr0_el1
|
||||||
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
|
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
|
||||||
cbz x0, 1f
|
cbz x0, .Lskip_gicv3_\@
|
||||||
|
|
||||||
mrs_s x0, SYS_ICC_SRE_EL2
|
mrs_s x0, SYS_ICC_SRE_EL2
|
||||||
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
|
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
|
||||||
@ -103,7 +96,7 @@
|
|||||||
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
|
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
|
||||||
tbz x0, #0, 1f // and check that it sticks
|
tbz x0, #0, 1f // and check that it sticks
|
||||||
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
||||||
1:
|
.Lskip_gicv3_\@:
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro __init_el2_hstr
|
.macro __init_el2_hstr
|
||||||
@ -128,14 +121,14 @@
|
|||||||
.macro __init_el2_nvhe_sve
|
.macro __init_el2_nvhe_sve
|
||||||
mrs x1, id_aa64pfr0_el1
|
mrs x1, id_aa64pfr0_el1
|
||||||
ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
|
ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
|
||||||
cbz x1, 1f
|
cbz x1, .Lskip_sve_\@
|
||||||
|
|
||||||
bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
|
bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
|
||||||
msr cptr_el2, x0 // Disable copro. traps to EL2
|
msr cptr_el2, x0 // Disable copro. traps to EL2
|
||||||
isb
|
isb
|
||||||
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
|
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
|
||||||
msr_s SYS_ZCR_EL2, x1 // length for EL1.
|
msr_s SYS_ZCR_EL2, x1 // length for EL1.
|
||||||
1:
|
.Lskip_sve_\@:
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro __init_el2_nvhe_prepare_eret
|
.macro __init_el2_nvhe_prepare_eret
|
||||||
@ -145,37 +138,24 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize EL2 registers to sane values. This should be called early on all
|
* Initialize EL2 registers to sane values. This should be called early on all
|
||||||
* cores that were booted in EL2.
|
* cores that were booted in EL2. Note that everything gets initialised as
|
||||||
|
* if VHE was not evailable. The kernel context will be upgraded to VHE
|
||||||
|
* if possible later on in the boot process
|
||||||
*
|
*
|
||||||
* Regs: x0, x1 and x2 are clobbered.
|
* Regs: x0, x1 and x2 are clobbered.
|
||||||
*/
|
*/
|
||||||
.macro init_el2_state mode
|
.macro init_el2_state
|
||||||
.ifnes "\mode", "vhe"
|
|
||||||
.ifnes "\mode", "nvhe"
|
|
||||||
.error "Invalid 'mode' argument"
|
|
||||||
.endif
|
|
||||||
.endif
|
|
||||||
|
|
||||||
__init_el2_sctlr
|
__init_el2_sctlr
|
||||||
__init_el2_timers \mode
|
__init_el2_timers
|
||||||
__init_el2_debug \mode
|
__init_el2_debug
|
||||||
__init_el2_lor
|
__init_el2_lor
|
||||||
__init_el2_stage2
|
__init_el2_stage2
|
||||||
__init_el2_gicv3
|
__init_el2_gicv3
|
||||||
__init_el2_hstr
|
__init_el2_hstr
|
||||||
|
|
||||||
/*
|
|
||||||
* When VHE is not in use, early init of EL2 needs to be done here.
|
|
||||||
* When VHE _is_ in use, EL1 will not be used in the host and
|
|
||||||
* requires no configuration, and all non-hyp-specific EL2 setup
|
|
||||||
* will be done via the _EL1 system register aliases in __cpu_setup.
|
|
||||||
*/
|
|
||||||
.ifeqs "\mode", "nvhe"
|
|
||||||
__init_el2_nvhe_idregs
|
__init_el2_nvhe_idregs
|
||||||
__init_el2_nvhe_cptr
|
__init_el2_nvhe_cptr
|
||||||
__init_el2_nvhe_sve
|
__init_el2_nvhe_sve
|
||||||
__init_el2_nvhe_prepare_eret
|
__init_el2_nvhe_prepare_eret
|
||||||
.endif
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#endif /* __ARM_KVM_INIT_H__ */
|
#endif /* __ARM_KVM_INIT_H__ */
|
||||||
|
@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
|
|||||||
return ptrauth_clear_pac(ptr);
|
return ptrauth_clear_pac(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void ptrauth_enable(void)
|
||||||
|
{
|
||||||
|
if (!system_supports_address_auth())
|
||||||
|
return;
|
||||||
|
sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
|
||||||
|
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB));
|
||||||
|
isb();
|
||||||
|
}
|
||||||
|
|
||||||
#define ptrauth_thread_init_user(tsk) \
|
#define ptrauth_thread_init_user(tsk) \
|
||||||
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
|
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
|
||||||
#define ptrauth_thread_init_kernel(tsk) \
|
#define ptrauth_thread_init_kernel(tsk) \
|
||||||
@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
|
|||||||
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
|
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
|
||||||
|
|
||||||
#else /* CONFIG_ARM64_PTR_AUTH */
|
#else /* CONFIG_ARM64_PTR_AUTH */
|
||||||
|
#define ptrauth_enable()
|
||||||
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
|
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
|
||||||
#define ptrauth_strip_insn_pac(lr) (lr)
|
#define ptrauth_strip_insn_pac(lr) (lr)
|
||||||
#define ptrauth_thread_init_user(tsk)
|
#define ptrauth_thread_init_user(tsk)
|
||||||
|
11
arch/arm64/include/asm/setup.h
Normal file
11
arch/arm64/include/asm/setup.h
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#ifndef __ARM64_ASM_SETUP_H
|
||||||
|
#define __ARM64_ASM_SETUP_H
|
||||||
|
|
||||||
|
#include <uapi/asm/setup.h>
|
||||||
|
|
||||||
|
void *get_early_fdt_ptr(void);
|
||||||
|
void early_fdt_map(u64 dt_phys);
|
||||||
|
|
||||||
|
#endif
|
@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void)
|
|||||||
#endif
|
#endif
|
||||||
ptrauth_thread_init_kernel(current);
|
ptrauth_thread_init_kernel(current);
|
||||||
ptrauth_thread_switch_kernel(current);
|
ptrauth_thread_switch_kernel(current);
|
||||||
|
ptrauth_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _ASM_STACKPROTECTOR_H */
|
#endif /* _ASM_STACKPROTECTOR_H */
|
||||||
|
@ -35,8 +35,13 @@
|
|||||||
*/
|
*/
|
||||||
#define HVC_RESET_VECTORS 2
|
#define HVC_RESET_VECTORS 2
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
|
||||||
|
*/
|
||||||
|
#define HVC_VHE_RESTART 3
|
||||||
|
|
||||||
/* Max number of HYP stub hypercalls */
|
/* Max number of HYP stub hypercalls */
|
||||||
#define HVC_STUB_HCALL_NR 3
|
#define HVC_STUB_HCALL_NR 4
|
||||||
|
|
||||||
/* Error returned when an invalid stub number is passed into x0 */
|
/* Error returned when an invalid stub number is passed into x0 */
|
||||||
#define HVC_STUB_ERR 0xbadca11
|
#define HVC_STUB_ERR 0xbadca11
|
||||||
|
@ -17,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
|||||||
return_address.o cpuinfo.o cpu_errata.o \
|
return_address.o cpuinfo.o cpu_errata.o \
|
||||||
cpufeature.o alternative.o cacheinfo.o \
|
cpufeature.o alternative.o cacheinfo.o \
|
||||||
smp.o smp_spin_table.o topology.o smccc-call.o \
|
smp.o smp_spin_table.o topology.o smccc-call.o \
|
||||||
syscall.o proton-pack.o
|
syscall.o proton-pack.o idreg-override.o
|
||||||
|
|
||||||
targets += efi-entry.o
|
targets += efi-entry.o
|
||||||
|
|
||||||
|
@ -99,6 +99,9 @@ int main(void)
|
|||||||
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
||||||
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
||||||
BLANK();
|
BLANK();
|
||||||
|
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
|
||||||
|
DEFINE(FTR_OVR_MASK_OFFSET, offsetof(struct arm64_ftr_override, mask));
|
||||||
|
BLANK();
|
||||||
#ifdef CONFIG_KVM
|
#ifdef CONFIG_KVM
|
||||||
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
||||||
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
|
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
|
||||||
|
@ -352,9 +352,12 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
|
|||||||
ARM64_FTR_END,
|
ARM64_FTR_END,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct arm64_ftr_override __ro_after_init no_override = { };
|
||||||
|
|
||||||
struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
|
struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
|
||||||
.name = "SYS_CTR_EL0",
|
.name = "SYS_CTR_EL0",
|
||||||
.ftr_bits = ftr_ctr
|
.ftr_bits = ftr_ctr,
|
||||||
|
.override = &no_override,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
|
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
|
||||||
@ -544,13 +547,20 @@ static const struct arm64_ftr_bits ftr_raz[] = {
|
|||||||
ARM64_FTR_END,
|
ARM64_FTR_END,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ARM64_FTR_REG(id, table) { \
|
#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \
|
||||||
.sys_id = id, \
|
.sys_id = id, \
|
||||||
.reg = &(struct arm64_ftr_reg){ \
|
.reg = &(struct arm64_ftr_reg){ \
|
||||||
.name = #id, \
|
.name = #id, \
|
||||||
|
.override = (ovr), \
|
||||||
.ftr_bits = &((table)[0]), \
|
.ftr_bits = &((table)[0]), \
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override)
|
||||||
|
|
||||||
|
struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
|
||||||
|
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
|
||||||
|
struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
|
||||||
|
|
||||||
static const struct __ftr_reg_entry {
|
static const struct __ftr_reg_entry {
|
||||||
u32 sys_id;
|
u32 sys_id;
|
||||||
struct arm64_ftr_reg *reg;
|
struct arm64_ftr_reg *reg;
|
||||||
@ -585,7 +595,8 @@ static const struct __ftr_reg_entry {
|
|||||||
|
|
||||||
/* Op1 = 0, CRn = 0, CRm = 4 */
|
/* Op1 = 0, CRn = 0, CRm = 4 */
|
||||||
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
|
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
|
||||||
ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
|
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
|
||||||
|
&id_aa64pfr1_override),
|
||||||
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
|
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
|
||||||
|
|
||||||
/* Op1 = 0, CRn = 0, CRm = 5 */
|
/* Op1 = 0, CRn = 0, CRm = 5 */
|
||||||
@ -594,11 +605,13 @@ static const struct __ftr_reg_entry {
|
|||||||
|
|
||||||
/* Op1 = 0, CRn = 0, CRm = 6 */
|
/* Op1 = 0, CRn = 0, CRm = 6 */
|
||||||
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
|
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
|
||||||
ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
|
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
|
||||||
|
&id_aa64isar1_override),
|
||||||
|
|
||||||
/* Op1 = 0, CRn = 0, CRm = 7 */
|
/* Op1 = 0, CRn = 0, CRm = 7 */
|
||||||
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
|
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
|
||||||
ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
|
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
|
||||||
|
&id_aa64mmfr1_override),
|
||||||
ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
|
ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
|
||||||
|
|
||||||
/* Op1 = 0, CRn = 1, CRm = 2 */
|
/* Op1 = 0, CRn = 1, CRm = 2 */
|
||||||
@ -770,6 +783,33 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
|||||||
for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
|
for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
|
||||||
u64 ftr_mask = arm64_ftr_mask(ftrp);
|
u64 ftr_mask = arm64_ftr_mask(ftrp);
|
||||||
s64 ftr_new = arm64_ftr_value(ftrp, new);
|
s64 ftr_new = arm64_ftr_value(ftrp, new);
|
||||||
|
s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);
|
||||||
|
|
||||||
|
if ((ftr_mask & reg->override->mask) == ftr_mask) {
|
||||||
|
s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
|
||||||
|
char *str = NULL;
|
||||||
|
|
||||||
|
if (ftr_ovr != tmp) {
|
||||||
|
/* Unsafe, remove the override */
|
||||||
|
reg->override->mask &= ~ftr_mask;
|
||||||
|
reg->override->val &= ~ftr_mask;
|
||||||
|
tmp = ftr_ovr;
|
||||||
|
str = "ignoring override";
|
||||||
|
} else if (ftr_new != tmp) {
|
||||||
|
/* Override was valid */
|
||||||
|
ftr_new = tmp;
|
||||||
|
str = "forced";
|
||||||
|
} else if (ftr_ovr == tmp) {
|
||||||
|
/* Override was the safe value */
|
||||||
|
str = "already set";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (str)
|
||||||
|
pr_warn("%s[%d:%d]: %s to %llx\n",
|
||||||
|
reg->name,
|
||||||
|
ftrp->shift + ftrp->width - 1,
|
||||||
|
ftrp->shift, str, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
val = arm64_ftr_set_value(ftrp, val, ftr_new);
|
val = arm64_ftr_set_value(ftrp, val, ftr_new);
|
||||||
|
|
||||||
@ -1115,14 +1155,17 @@ u64 read_sanitised_ftr_reg(u32 id)
|
|||||||
EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
|
EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
|
||||||
|
|
||||||
#define read_sysreg_case(r) \
|
#define read_sysreg_case(r) \
|
||||||
case r: return read_sysreg_s(r)
|
case r: val = read_sysreg_s(r); break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
|
* __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
|
||||||
* Read the system register on the current CPU
|
* Read the system register on the current CPU
|
||||||
*/
|
*/
|
||||||
static u64 __read_sysreg_by_encoding(u32 sys_id)
|
u64 __read_sysreg_by_encoding(u32 sys_id)
|
||||||
{
|
{
|
||||||
|
struct arm64_ftr_reg *regp;
|
||||||
|
u64 val;
|
||||||
|
|
||||||
switch (sys_id) {
|
switch (sys_id) {
|
||||||
read_sysreg_case(SYS_ID_PFR0_EL1);
|
read_sysreg_case(SYS_ID_PFR0_EL1);
|
||||||
read_sysreg_case(SYS_ID_PFR1_EL1);
|
read_sysreg_case(SYS_ID_PFR1_EL1);
|
||||||
@ -1165,6 +1208,14 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
|
|||||||
BUG();
|
BUG();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
regp = get_arm64_ftr_reg(sys_id);
|
||||||
|
if (regp) {
|
||||||
|
val &= ~regp->override->mask;
|
||||||
|
val |= (regp->override->val & regp->override->mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <linux/irqchip/arm-gic-v3.h>
|
#include <linux/irqchip/arm-gic-v3.h>
|
||||||
|
@ -404,10 +404,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
|||||||
adr_l x5, init_task
|
adr_l x5, init_task
|
||||||
msr sp_el0, x5 // Save thread_info
|
msr sp_el0, x5 // Save thread_info
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
|
||||||
__ptrauth_keys_init_cpu x5, x6, x7, x8
|
|
||||||
#endif
|
|
||||||
|
|
||||||
adr_l x8, vectors // load VBAR_EL1 with virtual
|
adr_l x8, vectors // load VBAR_EL1 with virtual
|
||||||
msr vbar_el1, x8 // vector table address
|
msr vbar_el1, x8 // vector table address
|
||||||
isb
|
isb
|
||||||
@ -436,10 +432,12 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
|||||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||||
bl kasan_early_init
|
bl kasan_early_init
|
||||||
#endif
|
#endif
|
||||||
|
mov x0, x21 // pass FDT address in x0
|
||||||
|
bl early_fdt_map // Try mapping the FDT early
|
||||||
|
bl init_feature_override // Parse cpu feature overrides
|
||||||
#ifdef CONFIG_RANDOMIZE_BASE
|
#ifdef CONFIG_RANDOMIZE_BASE
|
||||||
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
|
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
|
||||||
b.ne 0f
|
b.ne 0f
|
||||||
mov x0, x21 // pass FDT address in x0
|
|
||||||
bl kaslr_early_init // parse FDT for KASLR options
|
bl kaslr_early_init // parse FDT for KASLR options
|
||||||
cbz x0, 0f // KASLR disabled? just proceed
|
cbz x0, 0f // KASLR disabled? just proceed
|
||||||
orr x23, x23, x0 // record KASLR offset
|
orr x23, x23, x0 // record KASLR offset
|
||||||
@ -447,6 +445,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
|||||||
ret // to __primary_switch()
|
ret // to __primary_switch()
|
||||||
0:
|
0:
|
||||||
#endif
|
#endif
|
||||||
|
bl switch_to_vhe // Prefer VHE if possible
|
||||||
add sp, sp, #16
|
add sp, sp, #16
|
||||||
mov x29, #0
|
mov x29, #0
|
||||||
mov x30, #0
|
mov x30, #0
|
||||||
@ -478,13 +477,14 @@ EXPORT_SYMBOL(kimage_vaddr)
|
|||||||
* booted in EL1 or EL2 respectively.
|
* booted in EL1 or EL2 respectively.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(init_kernel_el)
|
SYM_FUNC_START(init_kernel_el)
|
||||||
|
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||||
|
msr sctlr_el1, x0
|
||||||
|
|
||||||
mrs x0, CurrentEL
|
mrs x0, CurrentEL
|
||||||
cmp x0, #CurrentEL_EL2
|
cmp x0, #CurrentEL_EL2
|
||||||
b.eq init_el2
|
b.eq init_el2
|
||||||
|
|
||||||
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
|
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
|
||||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
|
||||||
msr sctlr_el1, x0
|
|
||||||
isb
|
isb
|
||||||
mov_q x0, INIT_PSTATE_EL1
|
mov_q x0, INIT_PSTATE_EL1
|
||||||
msr spsr_el1, x0
|
msr spsr_el1, x0
|
||||||
@ -493,50 +493,11 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
|
|||||||
eret
|
eret
|
||||||
|
|
||||||
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||||
#ifdef CONFIG_ARM64_VHE
|
|
||||||
/*
|
|
||||||
* Check for VHE being present. x2 being non-zero indicates that we
|
|
||||||
* do have VHE, and that the kernel is intended to run at EL2.
|
|
||||||
*/
|
|
||||||
mrs x2, id_aa64mmfr1_el1
|
|
||||||
ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
|
|
||||||
#else
|
|
||||||
mov x2, xzr
|
|
||||||
#endif
|
|
||||||
cbz x2, init_el2_nvhe
|
|
||||||
|
|
||||||
/*
|
|
||||||
* When VHE _is_ in use, EL1 will not be used in the host and
|
|
||||||
* requires no configuration, and all non-hyp-specific EL2 setup
|
|
||||||
* will be done via the _EL1 system register aliases in __cpu_setup.
|
|
||||||
*/
|
|
||||||
mov_q x0, HCR_HOST_VHE_FLAGS
|
|
||||||
msr hcr_el2, x0
|
|
||||||
isb
|
|
||||||
|
|
||||||
init_el2_state vhe
|
|
||||||
|
|
||||||
isb
|
|
||||||
|
|
||||||
mov_q x0, INIT_PSTATE_EL2
|
|
||||||
msr spsr_el2, x0
|
|
||||||
msr elr_el2, lr
|
|
||||||
mov w0, #BOOT_CPU_MODE_EL2
|
|
||||||
eret
|
|
||||||
|
|
||||||
SYM_INNER_LABEL(init_el2_nvhe, SYM_L_LOCAL)
|
|
||||||
/*
|
|
||||||
* When VHE is not in use, early init of EL2 and EL1 needs to be
|
|
||||||
* done here.
|
|
||||||
*/
|
|
||||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
|
||||||
msr sctlr_el1, x0
|
|
||||||
|
|
||||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||||
msr hcr_el2, x0
|
msr hcr_el2, x0
|
||||||
isb
|
isb
|
||||||
|
|
||||||
init_el2_state nvhe
|
init_el2_state
|
||||||
|
|
||||||
/* Hypervisor stub */
|
/* Hypervisor stub */
|
||||||
adr_l x0, __hyp_stub_vectors
|
adr_l x0, __hyp_stub_vectors
|
||||||
@ -623,6 +584,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
|
|||||||
/*
|
/*
|
||||||
* Common entry point for secondary CPUs.
|
* Common entry point for secondary CPUs.
|
||||||
*/
|
*/
|
||||||
|
bl switch_to_vhe
|
||||||
bl __cpu_secondary_check52bitva
|
bl __cpu_secondary_check52bitva
|
||||||
bl __cpu_setup // initialise processor
|
bl __cpu_setup // initialise processor
|
||||||
adrp x1, swapper_pg_dir
|
adrp x1, swapper_pg_dir
|
||||||
@ -703,16 +665,9 @@ SYM_FUNC_START(__enable_mmu)
|
|||||||
offset_ttbr1 x1, x3
|
offset_ttbr1 x1, x3
|
||||||
msr ttbr1_el1, x1 // load TTBR1
|
msr ttbr1_el1, x1 // load TTBR1
|
||||||
isb
|
isb
|
||||||
msr sctlr_el1, x0
|
|
||||||
isb
|
set_sctlr_el1 x0
|
||||||
/*
|
|
||||||
* Invalidate the local I-cache so that any instructions fetched
|
|
||||||
* speculatively from the PoC are discarded, since they may have
|
|
||||||
* been dynamically patched at the PoU.
|
|
||||||
*/
|
|
||||||
ic iallu
|
|
||||||
dsb nsh
|
|
||||||
isb
|
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(__enable_mmu)
|
SYM_FUNC_END(__enable_mmu)
|
||||||
|
|
||||||
@ -883,11 +838,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
|
|||||||
tlbi vmalle1 // Remove any stale TLB entries
|
tlbi vmalle1 // Remove any stale TLB entries
|
||||||
dsb nsh
|
dsb nsh
|
||||||
|
|
||||||
msr sctlr_el1, x19 // re-enable the MMU
|
set_sctlr_el1 x19 // re-enable the MMU
|
||||||
isb
|
|
||||||
ic iallu // flush instructions fetched
|
|
||||||
dsb nsh // via old mapping
|
|
||||||
isb
|
|
||||||
|
|
||||||
bl __relocate_kernel
|
bl __relocate_kernel
|
||||||
#endif
|
#endif
|
||||||
|
@ -8,9 +8,9 @@
|
|||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/irqchip/arm-gic-v3.h>
|
|
||||||
|
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
#include <asm/el2_setup.h>
|
||||||
#include <asm/kvm_arm.h>
|
#include <asm/kvm_arm.h>
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
@ -47,10 +47,13 @@ SYM_CODE_END(__hyp_stub_vectors)
|
|||||||
|
|
||||||
SYM_CODE_START_LOCAL(el1_sync)
|
SYM_CODE_START_LOCAL(el1_sync)
|
||||||
cmp x0, #HVC_SET_VECTORS
|
cmp x0, #HVC_SET_VECTORS
|
||||||
b.ne 2f
|
b.ne 1f
|
||||||
msr vbar_el2, x1
|
msr vbar_el2, x1
|
||||||
b 9f
|
b 9f
|
||||||
|
|
||||||
|
1: cmp x0, #HVC_VHE_RESTART
|
||||||
|
b.eq mutate_to_vhe
|
||||||
|
|
||||||
2: cmp x0, #HVC_SOFT_RESTART
|
2: cmp x0, #HVC_SOFT_RESTART
|
||||||
b.ne 3f
|
b.ne 3f
|
||||||
mov x0, x2
|
mov x0, x2
|
||||||
@ -70,6 +73,88 @@ SYM_CODE_START_LOCAL(el1_sync)
|
|||||||
eret
|
eret
|
||||||
SYM_CODE_END(el1_sync)
|
SYM_CODE_END(el1_sync)
|
||||||
|
|
||||||
|
// nVHE? No way! Give me the real thing!
|
||||||
|
SYM_CODE_START_LOCAL(mutate_to_vhe)
|
||||||
|
// Be prepared to fail
|
||||||
|
mov_q x0, HVC_STUB_ERR
|
||||||
|
|
||||||
|
// Sanity check: MMU *must* be off
|
||||||
|
mrs x1, sctlr_el2
|
||||||
|
tbnz x1, #0, 1f
|
||||||
|
|
||||||
|
// Needs to be VHE capable, obviously
|
||||||
|
mrs x1, id_aa64mmfr1_el1
|
||||||
|
ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
|
||||||
|
cbz x1, 1f
|
||||||
|
|
||||||
|
// Check whether VHE is disabled from the command line
|
||||||
|
adr_l x1, id_aa64mmfr1_override
|
||||||
|
ldr x2, [x1, FTR_OVR_VAL_OFFSET]
|
||||||
|
ldr x1, [x1, FTR_OVR_MASK_OFFSET]
|
||||||
|
ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
|
||||||
|
ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
|
||||||
|
cmp x1, xzr
|
||||||
|
and x2, x2, x1
|
||||||
|
csinv x2, x2, xzr, ne
|
||||||
|
cbz x2, 1f
|
||||||
|
|
||||||
|
// Engage the VHE magic!
|
||||||
|
mov_q x0, HCR_HOST_VHE_FLAGS
|
||||||
|
msr hcr_el2, x0
|
||||||
|
isb
|
||||||
|
|
||||||
|
// Use the EL1 allocated stack, per-cpu offset
|
||||||
|
mrs x0, sp_el1
|
||||||
|
mov sp, x0
|
||||||
|
mrs x0, tpidr_el1
|
||||||
|
msr tpidr_el2, x0
|
||||||
|
|
||||||
|
// FP configuration, vectors
|
||||||
|
mrs_s x0, SYS_CPACR_EL12
|
||||||
|
msr cpacr_el1, x0
|
||||||
|
mrs_s x0, SYS_VBAR_EL12
|
||||||
|
msr vbar_el1, x0
|
||||||
|
|
||||||
|
// Use EL2 translations for SPE and disable access from EL1
|
||||||
|
mrs x0, mdcr_el2
|
||||||
|
bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
||||||
|
msr mdcr_el2, x0
|
||||||
|
|
||||||
|
// Transfer the MM state from EL1 to EL2
|
||||||
|
mrs_s x0, SYS_TCR_EL12
|
||||||
|
msr tcr_el1, x0
|
||||||
|
mrs_s x0, SYS_TTBR0_EL12
|
||||||
|
msr ttbr0_el1, x0
|
||||||
|
mrs_s x0, SYS_TTBR1_EL12
|
||||||
|
msr ttbr1_el1, x0
|
||||||
|
mrs_s x0, SYS_MAIR_EL12
|
||||||
|
msr mair_el1, x0
|
||||||
|
isb
|
||||||
|
|
||||||
|
// Invalidate TLBs before enabling the MMU
|
||||||
|
tlbi vmalle1
|
||||||
|
dsb nsh
|
||||||
|
|
||||||
|
// Enable the EL2 S1 MMU, as set up from EL1
|
||||||
|
mrs_s x0, SYS_SCTLR_EL12
|
||||||
|
set_sctlr_el1 x0
|
||||||
|
|
||||||
|
// Disable the EL1 S1 MMU for a good measure
|
||||||
|
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||||
|
msr_s SYS_SCTLR_EL12, x0
|
||||||
|
|
||||||
|
// Hack the exception return to stay at EL2
|
||||||
|
mrs x0, spsr_el1
|
||||||
|
and x0, x0, #~PSR_MODE_MASK
|
||||||
|
mov x1, #PSR_MODE_EL2h
|
||||||
|
orr x0, x0, x1
|
||||||
|
msr spsr_el1, x0
|
||||||
|
|
||||||
|
mov x0, xzr
|
||||||
|
|
||||||
|
1: eret
|
||||||
|
SYM_CODE_END(mutate_to_vhe)
|
||||||
|
|
||||||
.macro invalid_vector label
|
.macro invalid_vector label
|
||||||
SYM_CODE_START_LOCAL(\label)
|
SYM_CODE_START_LOCAL(\label)
|
||||||
b \label
|
b \label
|
||||||
@ -118,3 +203,27 @@ SYM_FUNC_START(__hyp_reset_vectors)
|
|||||||
hvc #0
|
hvc #0
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(__hyp_reset_vectors)
|
SYM_FUNC_END(__hyp_reset_vectors)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Entry point to switch to VHE if deemed capable
|
||||||
|
*/
|
||||||
|
SYM_FUNC_START(switch_to_vhe)
|
||||||
|
#ifdef CONFIG_ARM64_VHE
|
||||||
|
// Need to have booted at EL2
|
||||||
|
adr_l x1, __boot_cpu_mode
|
||||||
|
ldr w0, [x1]
|
||||||
|
cmp w0, #BOOT_CPU_MODE_EL2
|
||||||
|
b.ne 1f
|
||||||
|
|
||||||
|
// and still be at EL1
|
||||||
|
mrs x0, CurrentEL
|
||||||
|
cmp x0, #CurrentEL_EL1
|
||||||
|
b.ne 1f
|
||||||
|
|
||||||
|
// Turn the world upside down
|
||||||
|
mov x0, #HVC_VHE_RESTART
|
||||||
|
hvc #0
|
||||||
|
1:
|
||||||
|
#endif
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(switch_to_vhe)
|
||||||
|
216
arch/arm64/kernel/idreg-override.c
Normal file
216
arch/arm64/kernel/idreg-override.c
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Early cpufeature override framework
|
||||||
|
*
|
||||||
|
* Copyright (C) 2020 Google LLC
|
||||||
|
* Author: Marc Zyngier <maz@kernel.org>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/ctype.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/libfdt.h>
|
||||||
|
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/cpufeature.h>
|
||||||
|
#include <asm/setup.h>
|
||||||
|
|
||||||
|
#define FTR_DESC_NAME_LEN 20
|
||||||
|
#define FTR_DESC_FIELD_LEN 10
|
||||||
|
#define FTR_ALIAS_NAME_LEN 30
|
||||||
|
#define FTR_ALIAS_OPTION_LEN 80
|
||||||
|
|
||||||
|
struct ftr_set_desc {
|
||||||
|
char name[FTR_DESC_NAME_LEN];
|
||||||
|
struct arm64_ftr_override *override;
|
||||||
|
struct {
|
||||||
|
char name[FTR_DESC_FIELD_LEN];
|
||||||
|
u8 shift;
|
||||||
|
} fields[];
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct ftr_set_desc mmfr1 __initconst = {
|
||||||
|
.name = "id_aa64mmfr1",
|
||||||
|
.override = &id_aa64mmfr1_override,
|
||||||
|
.fields = {
|
||||||
|
{ "vh", ID_AA64MMFR1_VHE_SHIFT },
|
||||||
|
{}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct ftr_set_desc pfr1 __initconst = {
|
||||||
|
.name = "id_aa64pfr1",
|
||||||
|
.override = &id_aa64pfr1_override,
|
||||||
|
.fields = {
|
||||||
|
{ "bt", ID_AA64PFR1_BT_SHIFT },
|
||||||
|
{}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct ftr_set_desc isar1 __initconst = {
|
||||||
|
.name = "id_aa64isar1",
|
||||||
|
.override = &id_aa64isar1_override,
|
||||||
|
.fields = {
|
||||||
|
{ "gpi", ID_AA64ISAR1_GPI_SHIFT },
|
||||||
|
{ "gpa", ID_AA64ISAR1_GPA_SHIFT },
|
||||||
|
{ "api", ID_AA64ISAR1_API_SHIFT },
|
||||||
|
{ "apa", ID_AA64ISAR1_APA_SHIFT },
|
||||||
|
{}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
extern struct arm64_ftr_override kaslr_feature_override;
|
||||||
|
|
||||||
|
static const struct ftr_set_desc kaslr __initconst = {
|
||||||
|
.name = "kaslr",
|
||||||
|
#ifdef CONFIG_RANDOMIZE_BASE
|
||||||
|
.override = &kaslr_feature_override,
|
||||||
|
#endif
|
||||||
|
.fields = {
|
||||||
|
{ "disabled", 0 },
|
||||||
|
{}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct ftr_set_desc * const regs[] __initconst = {
|
||||||
|
&mmfr1,
|
||||||
|
&pfr1,
|
||||||
|
&isar1,
|
||||||
|
&kaslr,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct {
|
||||||
|
char alias[FTR_ALIAS_NAME_LEN];
|
||||||
|
char feature[FTR_ALIAS_OPTION_LEN];
|
||||||
|
} aliases[] __initconst = {
|
||||||
|
{ "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
|
||||||
|
{ "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" },
|
||||||
|
{ "arm64.nobti", "id_aa64pfr1.bt=0" },
|
||||||
|
{ "arm64.nopauth",
|
||||||
|
"id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
|
||||||
|
"id_aa64isar1.api=0 id_aa64isar1.apa=0" },
|
||||||
|
{ "nokaslr", "kaslr.disabled=1" },
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init find_field(const char *cmdline,
|
||||||
|
const struct ftr_set_desc *reg, int f, u64 *v)
|
||||||
|
{
|
||||||
|
char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2];
|
||||||
|
int len;
|
||||||
|
|
||||||
|
len = snprintf(opt, ARRAY_SIZE(opt), "%s.%s=",
|
||||||
|
reg->name, reg->fields[f].name);
|
||||||
|
|
||||||
|
if (!parameqn(cmdline, opt, len))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
return kstrtou64(cmdline + len, 0, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init match_options(const char *cmdline)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(regs); i++) {
|
||||||
|
int f;
|
||||||
|
|
||||||
|
if (!regs[i]->override)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
for (f = 0; strlen(regs[i]->fields[f].name); f++) {
|
||||||
|
u64 shift = regs[i]->fields[f].shift;
|
||||||
|
u64 mask = 0xfUL << shift;
|
||||||
|
u64 v;
|
||||||
|
|
||||||
|
if (find_field(cmdline, regs[i], f, &v))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
regs[i]->override->val &= ~mask;
|
||||||
|
regs[i]->override->val |= (v << shift) & mask;
|
||||||
|
regs[i]->override->mask |= mask;
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
|
||||||
|
{
|
||||||
|
do {
|
||||||
|
char buf[256];
|
||||||
|
size_t len;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
cmdline = skip_spaces(cmdline);
|
||||||
|
|
||||||
|
for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++);
|
||||||
|
if (!len)
|
||||||
|
return;
|
||||||
|
|
||||||
|
len = min(len, ARRAY_SIZE(buf) - 1);
|
||||||
|
strncpy(buf, cmdline, len);
|
||||||
|
buf[len] = 0;
|
||||||
|
|
||||||
|
if (strcmp(buf, "--") == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cmdline += len;
|
||||||
|
|
||||||
|
match_options(buf);
|
||||||
|
|
||||||
|
for (i = 0; parse_aliases && i < ARRAY_SIZE(aliases); i++)
|
||||||
|
if (parameq(buf, aliases[i].alias))
|
||||||
|
__parse_cmdline(aliases[i].feature, false);
|
||||||
|
} while (1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init void parse_cmdline(void)
|
||||||
|
{
|
||||||
|
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
|
||||||
|
const u8 *prop;
|
||||||
|
void *fdt;
|
||||||
|
int node;
|
||||||
|
|
||||||
|
fdt = get_early_fdt_ptr();
|
||||||
|
if (!fdt)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
node = fdt_path_offset(fdt, "/chosen");
|
||||||
|
if (node < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
||||||
|
if (!prop)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
__parse_cmdline(prop, true);
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
__parse_cmdline(CONFIG_CMDLINE, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Keep checkers quiet */
|
||||||
|
void init_feature_override(void);
|
||||||
|
|
||||||
|
asmlinkage void __init init_feature_override(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(regs); i++) {
|
||||||
|
if (regs[i]->override) {
|
||||||
|
regs[i]->override->val = 0;
|
||||||
|
regs[i]->override->mask = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_cmdline();
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(regs); i++) {
|
||||||
|
if (regs[i]->override)
|
||||||
|
__flush_dcache_area(regs[i]->override,
|
||||||
|
sizeof(*regs[i]->override));
|
||||||
|
}
|
||||||
|
}
|
@ -19,6 +19,7 @@
|
|||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
#include <asm/setup.h>
|
||||||
|
|
||||||
enum kaslr_status {
|
enum kaslr_status {
|
||||||
KASLR_ENABLED,
|
KASLR_ENABLED,
|
||||||
@ -50,39 +51,7 @@ static __init u64 get_kaslr_seed(void *fdt)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init bool cmdline_contains_nokaslr(const u8 *cmdline)
|
struct arm64_ftr_override kaslr_feature_override __initdata;
|
||||||
{
|
|
||||||
const u8 *str;
|
|
||||||
|
|
||||||
str = strstr(cmdline, "nokaslr");
|
|
||||||
return str == cmdline || (str > cmdline && *(str - 1) == ' ');
|
|
||||||
}
|
|
||||||
|
|
||||||
static __init bool is_kaslr_disabled_cmdline(void *fdt)
|
|
||||||
{
|
|
||||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
|
|
||||||
int node;
|
|
||||||
const u8 *prop;
|
|
||||||
|
|
||||||
node = fdt_path_offset(fdt, "/chosen");
|
|
||||||
if (node < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
|
||||||
if (!prop)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (cmdline_contains_nokaslr(prop))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
return cmdline_contains_nokaslr(CONFIG_CMDLINE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine will be executed with the kernel mapped at its default virtual
|
* This routine will be executed with the kernel mapped at its default virtual
|
||||||
@ -92,12 +61,11 @@ out:
|
|||||||
* containing function pointers) to be reinitialized, and zero-initialized
|
* containing function pointers) to be reinitialized, and zero-initialized
|
||||||
* .bss variables will be reset to 0.
|
* .bss variables will be reset to 0.
|
||||||
*/
|
*/
|
||||||
u64 __init kaslr_early_init(u64 dt_phys)
|
u64 __init kaslr_early_init(void)
|
||||||
{
|
{
|
||||||
void *fdt;
|
void *fdt;
|
||||||
u64 seed, offset, mask, module_range;
|
u64 seed, offset, mask, module_range;
|
||||||
unsigned long raw;
|
unsigned long raw;
|
||||||
int size;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set a reasonable default for module_alloc_base in case
|
* Set a reasonable default for module_alloc_base in case
|
||||||
@ -111,8 +79,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
|||||||
* and proceed with KASLR disabled. We will make another
|
* and proceed with KASLR disabled. We will make another
|
||||||
* attempt at mapping the FDT in setup_machine()
|
* attempt at mapping the FDT in setup_machine()
|
||||||
*/
|
*/
|
||||||
early_fixmap_init();
|
fdt = get_early_fdt_ptr();
|
||||||
fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
|
|
||||||
if (!fdt) {
|
if (!fdt) {
|
||||||
kaslr_status = KASLR_DISABLED_FDT_REMAP;
|
kaslr_status = KASLR_DISABLED_FDT_REMAP;
|
||||||
return 0;
|
return 0;
|
||||||
@ -127,7 +94,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
|||||||
* Check if 'nokaslr' appears on the command line, and
|
* Check if 'nokaslr' appears on the command line, and
|
||||||
* return 0 if that is the case.
|
* return 0 if that is the case.
|
||||||
*/
|
*/
|
||||||
if (is_kaslr_disabled_cmdline(fdt)) {
|
if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
|
||||||
kaslr_status = KASLR_DISABLED_CMDLINE;
|
kaslr_status = KASLR_DISABLED_CMDLINE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -168,6 +168,21 @@ static void __init smp_build_mpidr_hash(void)
|
|||||||
pr_warn("Large number of MPIDR hash buckets detected\n");
|
pr_warn("Large number of MPIDR hash buckets detected\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *early_fdt_ptr __initdata;
|
||||||
|
|
||||||
|
void __init *get_early_fdt_ptr(void)
|
||||||
|
{
|
||||||
|
return early_fdt_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
asmlinkage void __init early_fdt_map(u64 dt_phys)
|
||||||
|
{
|
||||||
|
int fdt_size;
|
||||||
|
|
||||||
|
early_fixmap_init();
|
||||||
|
early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
static void __init setup_machine_fdt(phys_addr_t dt_phys)
|
static void __init setup_machine_fdt(phys_addr_t dt_phys)
|
||||||
{
|
{
|
||||||
int size;
|
int size;
|
||||||
|
@ -100,6 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
|
|||||||
.pushsection ".idmap.text", "awx"
|
.pushsection ".idmap.text", "awx"
|
||||||
SYM_CODE_START(cpu_resume)
|
SYM_CODE_START(cpu_resume)
|
||||||
bl init_kernel_el
|
bl init_kernel_el
|
||||||
|
bl switch_to_vhe
|
||||||
bl __cpu_setup
|
bl __cpu_setup
|
||||||
/* enable the MMU early - so we can access sleep_save_stash by va */
|
/* enable the MMU early - so we can access sleep_save_stash by va */
|
||||||
adrp x1, swapper_pg_dir
|
adrp x1, swapper_pg_dir
|
||||||
|
@ -1966,6 +1966,9 @@ static int __init early_kvm_mode_cfg(char *arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode()))
|
||||||
|
return 0;
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
early_param("kvm-arm.mode", early_kvm_mode_cfg);
|
early_param("kvm-arm.mode", early_kvm_mode_cfg);
|
||||||
|
@ -189,7 +189,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
|
|||||||
2: msr SPsel, #1 // We want to use SP_EL{1,2}
|
2: msr SPsel, #1 // We want to use SP_EL{1,2}
|
||||||
|
|
||||||
/* Initialize EL2 CPU state to sane values. */
|
/* Initialize EL2 CPU state to sane values. */
|
||||||
init_el2_state nvhe // Clobbers x0..x2
|
init_el2_state // Clobbers x0..x2
|
||||||
|
|
||||||
/* Enable MMU, set vectors and stack. */
|
/* Enable MMU, set vectors and stack. */
|
||||||
mov x0, x28
|
mov x0, x28
|
||||||
|
@ -628,7 +628,7 @@ static bool arm64_early_this_cpu_has_bti(void)
|
|||||||
if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
|
if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
|
pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
|
||||||
return cpuid_feature_extract_unsigned_field(pfr1,
|
return cpuid_feature_extract_unsigned_field(pfr1,
|
||||||
ID_AA64PFR1_BT_SHIFT);
|
ID_AA64PFR1_BT_SHIFT);
|
||||||
}
|
}
|
||||||
|
@ -291,17 +291,7 @@ skip_pgd:
|
|||||||
/* We're done: fire up the MMU again */
|
/* We're done: fire up the MMU again */
|
||||||
mrs x17, sctlr_el1
|
mrs x17, sctlr_el1
|
||||||
orr x17, x17, #SCTLR_ELx_M
|
orr x17, x17, #SCTLR_ELx_M
|
||||||
msr sctlr_el1, x17
|
set_sctlr_el1 x17
|
||||||
isb
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Invalidate the local I-cache so that any instructions fetched
|
|
||||||
* speculatively from the PoC are discarded, since they may have
|
|
||||||
* been dynamically patched at the PoU.
|
|
||||||
*/
|
|
||||||
ic iallu
|
|
||||||
dsb nsh
|
|
||||||
isb
|
|
||||||
|
|
||||||
/* Set the flag to zero to indicate that we're all done */
|
/* Set the flag to zero to indicate that we're all done */
|
||||||
str wzr, [flag_ptr]
|
str wzr, [flag_ptr]
|
||||||
@ -464,8 +454,8 @@ SYM_FUNC_START(__cpu_setup)
|
|||||||
#endif
|
#endif
|
||||||
msr mair_el1, x5
|
msr mair_el1, x5
|
||||||
/*
|
/*
|
||||||
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
|
* Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
|
||||||
* both user and kernel.
|
* adjusted if the kernel is compiled with 52bit VA support.
|
||||||
*/
|
*/
|
||||||
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||||
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
||||||
|
Loading…
Reference in New Issue
Block a user