KVM/arm64 fixes for Linux 5.8, take #1
* 32bit VM fixes: - Fix embarassing mapping issue between AArch32 CSSELR and AArch64 ACTLR - Add ACTLR2 support for AArch32 - Get rid of the useless ACTLR_EL1 save/restore - Fix CP14/15 accesses for AArch32 guests on BE hosts - Ensure that we don't loose any state when injecting a 32bit exception when running on a VHE host * 64bit VM fixes: - Fix PtrAuth host saving happening in preemptible contexts - Optimize PtrAuth lazy enable - Drop vcpu to cpu context pointer - Fix sparse warnings for HYP per-CPU accesses -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl7h6r8PHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpDE3gP/iogqGjZasUIwk4gdIc4IaxxNsfTYJFIh5uw sedAqwCQg3OftX0jptp6GhI3ZIG5UPuGDM7f3aio6i02pjx6bfBxGJ9AXqNcp6gN WcECHsAfzHUScznRhBbVflKkOF4dzfzyiutnMdknihePOyO9drwdvzXuJa37cs52 tsCneP9xQ/vQWdqu42uPS7HtSepSa/Lf/qeKGaTDWQIvNYGI3PctQvRAxx4FNHc/ SMUpS5zdTFceVoya/2+azTJ24R1lbwlPwaw2WoaghB+QmREKN8uMKy5kjrO5YUnH 8BtjESiNBI2CZYSwcxFt+QNA6EmymwDwfrmOE+7iBCZelOLWLVYbJ7icKX3kT731 gts5PBD8JlZWAnbH/Mbo4qngXJwHaijA38Bt8rvSphI0aK6iOU6DP5BuOurzNRde XczDYq3lqdCC2ynROjRpH4paVo7s0sBjjgZ7OsWqsw9uRAogwTkVE2sEi4HdqNAH JHhIHEKj7t/bRtzneXVk6ngoezIs6sIdcqrUZ+rAMnmMHbrzBoEqnlrlQ7e2/UXY yvY5Yc3/H2pKRCK/KznOi1nVG+xUZp4RZp552pwULF+JVbmMHIOxn3IxiejfMZVx czD5cxMcgMWa14ZZRN0DynT9wCg+s+MGaKGR6STyudVYHFBTr7hrsuM1zq/neMQf JcUBVUot =I2Li -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for Linux 5.8, take #1 * 32bit VM fixes: - Fix embarassing mapping issue between AArch32 CSSELR and AArch64 ACTLR - Add ACTLR2 support for AArch32 - Get rid of the useless ACTLR_EL1 save/restore - Fix CP14/15 accesses for AArch32 guests on BE hosts - Ensure that we don't loose any state when injecting a 32bit exception when running on a VHE host * 64bit VM fixes: - Fix PtrAuth host saving happening in preemptible contexts - Optimize PtrAuth lazy enable - Drop vcpu to cpu context pointer - Fix sparse warnings for HYP per-CPU accesses
This commit is contained in:
commit
49b3deaad3
@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);
|
|||||||
|
|
||||||
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
|
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
|
||||||
|
|
||||||
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
|
/*
|
||||||
|
* Obtain the PC-relative address of a kernel symbol
|
||||||
|
* s: symbol
|
||||||
|
*
|
||||||
|
* The goal of this macro is to return a symbol's address based on a
|
||||||
|
* PC-relative computation, as opposed to a loading the VA from a
|
||||||
|
* constant pool or something similar. This works well for HYP, as an
|
||||||
|
* absolute VA is guaranteed to be wrong. Only use this if trying to
|
||||||
|
* obtain the address of a symbol (i.e. not something you obtained by
|
||||||
|
* following a pointer).
|
||||||
|
*/
|
||||||
|
#define hyp_symbol_addr(s) \
|
||||||
|
({ \
|
||||||
|
typeof(s) *addr; \
|
||||||
|
asm("adrp %0, %1\n" \
|
||||||
|
"add %0, %0, :lo12:%1\n" \
|
||||||
|
: "=r" (addr) : "S" (&s)); \
|
||||||
|
addr; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
|
||||||
|
* provided that sym is really a *symbol* and not a pointer obtained from
|
||||||
|
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
|
||||||
|
* sparse quiet.
|
||||||
|
*/
|
||||||
#define __hyp_this_cpu_ptr(sym) \
|
#define __hyp_this_cpu_ptr(sym) \
|
||||||
({ \
|
({ \
|
||||||
void *__ptr = hyp_symbol_addr(sym); \
|
void *__ptr; \
|
||||||
|
__verify_pcpu_ptr(&sym); \
|
||||||
|
__ptr = hyp_symbol_addr(sym); \
|
||||||
__ptr += read_sysreg(tpidr_el2); \
|
__ptr += read_sysreg(tpidr_el2); \
|
||||||
(typeof(&sym))__ptr; \
|
(typeof(sym) __kernel __force *)__ptr; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __hyp_this_cpu_read(sym) \
|
#define __hyp_this_cpu_read(sym) \
|
||||||
|
@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
|
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
if (vcpu_has_ptrauth(vcpu))
|
|
||||||
vcpu_ptrauth_disable(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
|
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.vsesr_el2;
|
return vcpu->arch.vsesr_el2;
|
||||||
|
@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
|
|||||||
struct kvm_guest_debug_arch vcpu_debug_state;
|
struct kvm_guest_debug_arch vcpu_debug_state;
|
||||||
struct kvm_guest_debug_arch external_debug_state;
|
struct kvm_guest_debug_arch external_debug_state;
|
||||||
|
|
||||||
/* Pointer to host CPU context */
|
|
||||||
struct kvm_cpu_context *host_cpu_context;
|
|
||||||
|
|
||||||
struct thread_info *host_thread_info; /* hyp VA */
|
struct thread_info *host_thread_info; /* hyp VA */
|
||||||
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
|
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
|
||||||
|
|
||||||
@ -404,8 +401,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
|
|||||||
* CP14 and CP15 live in the same array, as they are backed by the
|
* CP14 and CP15 live in the same array, as they are backed by the
|
||||||
* same system registers.
|
* same system registers.
|
||||||
*/
|
*/
|
||||||
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
|
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
|
||||||
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
|
|
||||||
|
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
|
||||||
|
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
|
||||||
|
|
||||||
struct kvm_vm_stat {
|
struct kvm_vm_stat {
|
||||||
ulong remote_tlb_flush;
|
ulong remote_tlb_flush;
|
||||||
|
@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
|||||||
|
|
||||||
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
||||||
|
|
||||||
/*
|
|
||||||
* Obtain the PC-relative address of a kernel symbol
|
|
||||||
* s: symbol
|
|
||||||
*
|
|
||||||
* The goal of this macro is to return a symbol's address based on a
|
|
||||||
* PC-relative computation, as opposed to a loading the VA from a
|
|
||||||
* constant pool or something similar. This works well for HYP, as an
|
|
||||||
* absolute VA is guaranteed to be wrong. Only use this if trying to
|
|
||||||
* obtain the address of a symbol (i.e. not something you obtained by
|
|
||||||
* following a pointer).
|
|
||||||
*/
|
|
||||||
#define hyp_symbol_addr(s) \
|
|
||||||
({ \
|
|
||||||
typeof(s) *addr; \
|
|
||||||
asm("adrp %0, %1\n" \
|
|
||||||
"add %0, %0, :lo12:%1\n" \
|
|
||||||
: "=r" (addr) : "S" (&s)); \
|
|
||||||
addr; \
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We currently support using a VM-specified IPA size. For backward
|
* We currently support using a VM-specified IPA size. For backward
|
||||||
* compatibility, the default IPA size is fixed to 40bits.
|
* compatibility, the default IPA size is fixed to 40bits.
|
||||||
|
@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
|
|||||||
[7] = { 4, 4 }, /* FIQ, unused */
|
[7] = { 4, 4 }, /* FIQ, unused */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
if (vcpu->arch.sysregs_loaded_on_cpu) {
|
||||||
|
kvm_arch_vcpu_put(vcpu);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
|
||||||
|
{
|
||||||
|
if (loaded) {
|
||||||
|
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When an exception is taken, most CPSR fields are left unchanged in the
|
* When an exception is taken, most CPSR fields are left unchanged in the
|
||||||
* handler. However, some are explicitly overridden (e.g. M[4:0]).
|
* handler. However, some are explicitly overridden (e.g. M[4:0]).
|
||||||
@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
|||||||
|
|
||||||
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
|
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
bool loaded = pre_fault_synchronize(vcpu);
|
||||||
|
|
||||||
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
|
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
|
||||||
|
post_fault_synchronize(vcpu, loaded);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
|||||||
u32 vect_offset;
|
u32 vect_offset;
|
||||||
u32 *far, *fsr;
|
u32 *far, *fsr;
|
||||||
bool is_lpae;
|
bool is_lpae;
|
||||||
|
bool loaded;
|
||||||
|
|
||||||
|
loaded = pre_fault_synchronize(vcpu);
|
||||||
|
|
||||||
if (is_pabt) {
|
if (is_pabt) {
|
||||||
vect_offset = 12;
|
vect_offset = 12;
|
||||||
@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
|||||||
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
|
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
|
||||||
*fsr = DFSR_FSC_EXTABT_nLPAE;
|
*fsr = DFSR_FSC_EXTABT_nLPAE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
post_fault_synchronize(vcpu, loaded);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
|
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||||
|
@ -335,10 +335,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
|||||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
{
|
{
|
||||||
int *last_ran;
|
int *last_ran;
|
||||||
kvm_host_data_t *cpu_data;
|
|
||||||
|
|
||||||
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
|
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
|
||||||
cpu_data = this_cpu_ptr(&kvm_host_data);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We might get preempted before the vCPU actually runs, but
|
* We might get preempted before the vCPU actually runs, but
|
||||||
@ -350,7 +348,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vcpu->cpu = cpu;
|
vcpu->cpu = cpu;
|
||||||
vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
|
|
||||||
|
|
||||||
kvm_vgic_load(vcpu);
|
kvm_vgic_load(vcpu);
|
||||||
kvm_timer_vcpu_load(vcpu);
|
kvm_timer_vcpu_load(vcpu);
|
||||||
@ -365,7 +362,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||||||
else
|
else
|
||||||
vcpu_set_wfx_traps(vcpu);
|
vcpu_set_wfx_traps(vcpu);
|
||||||
|
|
||||||
vcpu_ptrauth_setup_lazy(vcpu);
|
if (vcpu_has_ptrauth(vcpu))
|
||||||
|
vcpu_ptrauth_disable(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
@ -985,11 +983,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
|||||||
* Ensure a rebooted VM will fault in RAM pages and detect if the
|
* Ensure a rebooted VM will fault in RAM pages and detect if the
|
||||||
* guest MMU is turned off and flush the caches as needed.
|
* guest MMU is turned off and flush the caches as needed.
|
||||||
*
|
*
|
||||||
* S2FWB enforces all memory accesses to RAM being cacheable, we
|
* S2FWB enforces all memory accesses to RAM being cacheable,
|
||||||
* ensure that the cache is always coherent.
|
* ensuring that the data side is always coherent. We still
|
||||||
|
* need to invalidate the I-cache though, as FWB does *not*
|
||||||
|
* imply CTR_EL0.DIC.
|
||||||
*/
|
*/
|
||||||
if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
|
if (vcpu->arch.has_run_once) {
|
||||||
stage2_unmap_vm(vcpu->kvm);
|
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||||
|
stage2_unmap_vm(vcpu->kvm);
|
||||||
|
else
|
||||||
|
__flush_icache_all();
|
||||||
|
}
|
||||||
|
|
||||||
vcpu_reset_hcr(vcpu);
|
vcpu_reset_hcr(vcpu);
|
||||||
|
|
||||||
|
@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __ptrauth_save_key(regs, key) \
|
|
||||||
({ \
|
|
||||||
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
|
|
||||||
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Handle the guest trying to use a ptrauth instruction, or trying to access a
|
|
||||||
* ptrauth register.
|
|
||||||
*/
|
|
||||||
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpu_context *ctxt;
|
|
||||||
|
|
||||||
if (vcpu_has_ptrauth(vcpu)) {
|
|
||||||
vcpu_ptrauth_enable(vcpu);
|
|
||||||
ctxt = vcpu->arch.host_cpu_context;
|
|
||||||
__ptrauth_save_key(ctxt->sys_regs, APIA);
|
|
||||||
__ptrauth_save_key(ctxt->sys_regs, APIB);
|
|
||||||
__ptrauth_save_key(ctxt->sys_regs, APDA);
|
|
||||||
__ptrauth_save_key(ctxt->sys_regs, APDB);
|
|
||||||
__ptrauth_save_key(ctxt->sys_regs, APGA);
|
|
||||||
} else {
|
|
||||||
kvm_inject_undefined(vcpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
|
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
|
||||||
* a NOP).
|
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
|
||||||
|
* that we can do is give the guest an UNDEF.
|
||||||
*/
|
*/
|
||||||
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
kvm_arm_vcpu_ptrauth_trap(vcpu);
|
kvm_inject_undefined(vcpu);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
|||||||
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
host_dbg = &vcpu->arch.host_debug_state.regs;
|
host_dbg = &vcpu->arch.host_debug_state.regs;
|
||||||
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
||||||
@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
|
|||||||
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
host_dbg = &vcpu->arch.host_debug_state.regs;
|
host_dbg = &vcpu->arch.host_debug_state.regs;
|
||||||
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
||||||
|
@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
|
||||||
|
{
|
||||||
|
u32 ec = ESR_ELx_EC(esr);
|
||||||
|
|
||||||
|
if (ec == ESR_ELx_EC_PAC)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (ec != ESR_ELx_EC_SYS64)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
switch (esr_sys64_to_sysreg(esr)) {
|
||||||
|
case SYS_APIAKEYLO_EL1:
|
||||||
|
case SYS_APIAKEYHI_EL1:
|
||||||
|
case SYS_APIBKEYLO_EL1:
|
||||||
|
case SYS_APIBKEYHI_EL1:
|
||||||
|
case SYS_APDAKEYLO_EL1:
|
||||||
|
case SYS_APDAKEYHI_EL1:
|
||||||
|
case SYS_APDBKEYLO_EL1:
|
||||||
|
case SYS_APDBKEYHI_EL1:
|
||||||
|
case SYS_APGAKEYLO_EL1:
|
||||||
|
case SYS_APGAKEYHI_EL1:
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define __ptrauth_save_key(regs, key) \
|
||||||
|
({ \
|
||||||
|
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
|
||||||
|
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
|
||||||
|
})
|
||||||
|
|
||||||
|
static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_cpu_context *ctxt;
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
if (!vcpu_has_ptrauth(vcpu) ||
|
||||||
|
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||||
|
__ptrauth_save_key(ctxt->sys_regs, APIA);
|
||||||
|
__ptrauth_save_key(ctxt->sys_regs, APIB);
|
||||||
|
__ptrauth_save_key(ctxt->sys_regs, APDA);
|
||||||
|
__ptrauth_save_key(ctxt->sys_regs, APDB);
|
||||||
|
__ptrauth_save_key(ctxt->sys_regs, APGA);
|
||||||
|
|
||||||
|
vcpu_ptrauth_enable(vcpu);
|
||||||
|
|
||||||
|
val = read_sysreg(hcr_el2);
|
||||||
|
val |= (HCR_API | HCR_APK);
|
||||||
|
write_sysreg(val, hcr_el2);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true when we were able to fixup the guest exit and should return to
|
* Return true when we were able to fixup the guest exit and should return to
|
||||||
* the guest, false when we should restore the host state and return to the
|
* the guest, false when we should restore the host state and return to the
|
||||||
@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||||||
if (__hyp_handle_fpsimd(vcpu))
|
if (__hyp_handle_fpsimd(vcpu))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
if (__hyp_handle_ptrauth(vcpu))
|
||||||
|
return true;
|
||||||
|
|
||||||
if (!__populate_fault_info(vcpu))
|
if (!__populate_fault_info(vcpu))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_cpu_context *guest_ctxt;
|
struct kvm_cpu_context *guest_ctxt;
|
||||||
u64 exit_code;
|
u64 exit_code;
|
||||||
|
|
||||||
host_ctxt = vcpu->arch.host_cpu_context;
|
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||||
host_ctxt->__hyp_running_vcpu = vcpu;
|
host_ctxt->__hyp_running_vcpu = vcpu;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
|
|
||||||
@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
vcpu = kern_hyp_va(vcpu);
|
vcpu = kern_hyp_va(vcpu);
|
||||||
|
|
||||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||||
host_ctxt->__hyp_running_vcpu = vcpu;
|
host_ctxt->__hyp_running_vcpu = vcpu;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
|
|
||||||
|
@ -39,7 +39,6 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
|||||||
{
|
{
|
||||||
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
|
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
|
||||||
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
|
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
|
||||||
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
|
|
||||||
ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
|
ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
|
||||||
ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
|
ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
|
||||||
ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
|
ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
|
||||||
@ -123,7 +122,6 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
|||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
|
|
||||||
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
|
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
|
||||||
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
|
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
|
||||||
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
|
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
|
||||||
@ -267,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
|
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
|
|
||||||
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
||||||
|
struct kvm_cpu_context *host_ctxt;
|
||||||
|
|
||||||
if (!has_vhe())
|
if (!has_vhe())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||||
__sysreg_save_user_state(host_ctxt);
|
__sysreg_save_user_state(host_ctxt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -303,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
|
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
|
|
||||||
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
||||||
|
struct kvm_cpu_context *host_ctxt;
|
||||||
|
|
||||||
if (!has_vhe())
|
if (!has_vhe())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||||
deactivate_traps_vhe_put();
|
deactivate_traps_vhe_put();
|
||||||
|
|
||||||
__sysreg_save_el1_state(guest_ctxt);
|
__sysreg_save_el1_state(guest_ctxt);
|
||||||
|
@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
|
|||||||
*/
|
*/
|
||||||
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
|
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpu_context *host_ctxt;
|
|
||||||
struct kvm_host_data *host;
|
struct kvm_host_data *host;
|
||||||
u32 events_guest, events_host;
|
u32 events_guest, events_host;
|
||||||
|
|
||||||
if (!has_vhe())
|
if (!has_vhe())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
host_ctxt = vcpu->arch.host_cpu_context;
|
host = this_cpu_ptr(&kvm_host_data);
|
||||||
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
|
|
||||||
events_guest = host->pmu_events.events_guest;
|
events_guest = host->pmu_events.events_guest;
|
||||||
events_host = host->pmu_events.events_host;
|
events_host = host->pmu_events.events_host;
|
||||||
|
|
||||||
@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
|
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpu_context *host_ctxt;
|
|
||||||
struct kvm_host_data *host;
|
struct kvm_host_data *host;
|
||||||
u32 events_guest, events_host;
|
u32 events_guest, events_host;
|
||||||
|
|
||||||
if (!has_vhe())
|
if (!has_vhe())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
host_ctxt = vcpu->arch.host_cpu_context;
|
host = this_cpu_ptr(&kvm_host_data);
|
||||||
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
|
|
||||||
events_guest = host->pmu_events.events_guest;
|
events_guest = host->pmu_events.events_guest;
|
||||||
events_host = host->pmu_events.events_host;
|
events_host = host->pmu_events.events_host;
|
||||||
|
|
||||||
|
@ -78,7 +78,6 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
|
|||||||
switch (reg) {
|
switch (reg) {
|
||||||
case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
|
case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
|
||||||
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
|
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
|
||||||
case ACTLR_EL1: *val = read_sysreg_s(SYS_ACTLR_EL1); break;
|
|
||||||
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
|
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
|
||||||
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
||||||
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
||||||
@ -118,7 +117,6 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
|
|||||||
switch (reg) {
|
switch (reg) {
|
||||||
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
|
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
|
||||||
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
|
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
|
||||||
case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); break;
|
|
||||||
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
|
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
|
||||||
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
||||||
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
||||||
@ -1034,16 +1032,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
|
|||||||
struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *rd)
|
const struct sys_reg_desc *rd)
|
||||||
{
|
{
|
||||||
kvm_arm_vcpu_ptrauth_trap(vcpu);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return false for both cases as we never skip the trapped
|
* If we land here, that is because we didn't fixup the access on exit
|
||||||
* instruction:
|
* by allowing the PtrAuth sysregs. The only way this happens is when
|
||||||
*
|
* the guest does not have PtrAuth support enabled.
|
||||||
* - Either we re-execute the same key register access instruction
|
|
||||||
* after enabling ptrauth.
|
|
||||||
* - Or an UNDEF is injected as ptrauth is not supported/enabled.
|
|
||||||
*/
|
*/
|
||||||
|
kvm_inject_undefined(vcpu);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1319,10 +1314,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||||||
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
|
int reg = r->reg;
|
||||||
|
|
||||||
|
/* See the 32bit mapping in kvm_host.h */
|
||||||
|
if (p->is_aarch32)
|
||||||
|
reg = r->reg / 2;
|
||||||
|
|
||||||
if (p->is_write)
|
if (p->is_write)
|
||||||
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
|
vcpu_write_sys_reg(vcpu, p->regval, reg);
|
||||||
else
|
else
|
||||||
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
|
p->regval = vcpu_read_sys_reg(vcpu, reg);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,6 +27,14 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
|
|||||||
return ignore_write(vcpu, p);
|
return ignore_write(vcpu, p);
|
||||||
|
|
||||||
p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
|
p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
|
||||||
|
|
||||||
|
if (p->is_aarch32) {
|
||||||
|
if (r->Op2 & 2)
|
||||||
|
p->regval = upper_32_bits(p->regval);
|
||||||
|
else
|
||||||
|
p->regval = lower_32_bits(p->regval);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -47,6 +55,8 @@ static const struct sys_reg_desc genericv8_cp15_regs[] = {
|
|||||||
/* ACTLR */
|
/* ACTLR */
|
||||||
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
|
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
|
||||||
access_actlr },
|
access_actlr },
|
||||||
|
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b011),
|
||||||
|
access_actlr },
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kvm_sys_reg_target_table genericv8_target_table = {
|
static struct kvm_sys_reg_target_table genericv8_target_table = {
|
||||||
|
Loading…
Reference in New Issue
Block a user