mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
KVM/arm64 changes for 6.11
- Initial infrastructure for shadow stage-2 MMUs, as part of nested virtualization enablement - Support for userspace changes to the guest CTR_EL0 value, enabling (in part) migration of VMs between heterogenous hardware - Fixes + improvements to pKVM's FF-A proxy, adding support for v1.1 of the protocol - FPSIMD/SVE support for nested, including merged trap configuration and exception routing - New command-line parameter to control the WFx trap behavior under KVM - Introduce kCFI hardening in the EL2 hypervisor - Fixes + cleanups for handling presence/absence of FEAT_TCRX - Miscellaneous fixes + documentation updates -----BEGIN PGP SIGNATURE----- iI0EABYIADUWIQSNXHjWXuzMZutrKNKivnWIJHzdFgUCZpTCAxccb2xpdmVyLnVw dG9uQGxpbnV4LmRldgAKCRCivnWIJHzdFjChAQCWs9ucJag4USgvXpg5mo9sxzly kBZZ1o49N/VLxs4cagEAtq3KVNQNQyGXelYH6gr20aI85j6VnZW5W5z+sy5TAgk= =sSOt -----END PGP SIGNATURE----- Merge tag 'kvmarm-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 changes for 6.11 - Initial infrastructure for shadow stage-2 MMUs, as part of nested virtualization enablement - Support for userspace changes to the guest CTR_EL0 value, enabling (in part) migration of VMs between heterogenous hardware - Fixes + improvements to pKVM's FF-A proxy, adding support for v1.1 of the protocol - FPSIMD/SVE support for nested, including merged trap configuration and exception routing - New command-line parameter to control the WFx trap behavior under KVM - Introduce kCFI hardening in the EL2 hypervisor - Fixes + cleanups for handling presence/absence of FEAT_TCRX - Miscellaneous fixes + documentation updates
This commit is contained in:
commit
1c5a0b55ab
@ -2720,6 +2720,24 @@
|
||||
[KVM,ARM,EARLY] Allow use of GICv4 for direct
|
||||
injection of LPIs.
|
||||
|
||||
kvm-arm.wfe_trap_policy=
|
||||
[KVM,ARM] Control when to set WFE instruction trap for
|
||||
KVM VMs. Traps are allowed but not guaranteed by the
|
||||
CPU architecture.
|
||||
|
||||
trap: set WFE instruction trap
|
||||
|
||||
notrap: clear WFE instruction trap
|
||||
|
||||
kvm-arm.wfi_trap_policy=
|
||||
[KVM,ARM] Control when to set WFI instruction trap for
|
||||
KVM VMs. Traps are allowed but not guaranteed by the
|
||||
CPU architecture.
|
||||
|
||||
trap: set WFI instruction trap
|
||||
|
||||
notrap: clear WFI instruction trap
|
||||
|
||||
kvm_cma_resv_ratio=n [PPC,EARLY]
|
||||
Reserves given percentage from system memory area for
|
||||
contiguous memory allocation for KVM hash pagetable
|
||||
|
@ -891,12 +891,12 @@ like this::
|
||||
|
||||
The irq_type field has the following values:
|
||||
|
||||
- irq_type[0]:
|
||||
- KVM_ARM_IRQ_TYPE_CPU:
|
||||
out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ
|
||||
- irq_type[1]:
|
||||
- KVM_ARM_IRQ_TYPE_SPI:
|
||||
in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.)
|
||||
(the vcpu_index field is ignored)
|
||||
- irq_type[2]:
|
||||
- KVM_ARM_IRQ_TYPE_PPI:
|
||||
in-kernel GIC: PPI, irq_id between 16 and 31 (incl.)
|
||||
|
||||
(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs)
|
||||
@ -1927,7 +1927,7 @@ flags:
|
||||
|
||||
If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier
|
||||
for the device that wrote the MSI message. For PCI, this is usually a
|
||||
BFD identifier in the lower 16 bits.
|
||||
BDF identifier in the lower 16 bits.
|
||||
|
||||
On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS
|
||||
feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled,
|
||||
@ -2992,7 +2992,7 @@ flags:
|
||||
|
||||
If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier
|
||||
for the device that wrote the MSI message. For PCI, this is usually a
|
||||
BFD identifier in the lower 16 bits.
|
||||
BDF identifier in the lower 16 bits.
|
||||
|
||||
On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS
|
||||
feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled,
|
||||
|
@ -31,7 +31,7 @@ Groups:
|
||||
KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit)
|
||||
Base address in the guest physical address space of the GIC virtual cpu
|
||||
interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
|
||||
This address needs to be 4K aligned and the region covers 4 KByte.
|
||||
This address needs to be 4K aligned and the region covers 8 KByte.
|
||||
|
||||
Errors:
|
||||
|
||||
|
@ -12077,6 +12077,8 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: kvmarm@lists.linux.dev
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
|
||||
F: Documentation/virt/kvm/arm/
|
||||
F: Documentation/virt/kvm/devices/arm*
|
||||
F: arch/arm64/include/asm/kvm*
|
||||
F: arch/arm64/include/uapi/asm/kvm*
|
||||
F: arch/arm64/kvm/
|
||||
|
@ -152,6 +152,7 @@
|
||||
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
|
||||
|
||||
/* ISS field definitions for exceptions taken in to Hyp */
|
||||
#define ESR_ELx_FSC_ADDRSZ (0x00)
|
||||
#define ESR_ELx_CV (UL(1) << 24)
|
||||
#define ESR_ELx_COND_SHIFT (20)
|
||||
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
|
||||
@ -379,6 +380,11 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/types.h>
|
||||
|
||||
static inline unsigned long esr_brk_comment(unsigned long esr)
|
||||
{
|
||||
return esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
|
||||
}
|
||||
|
||||
static inline bool esr_is_data_abort(unsigned long esr)
|
||||
{
|
||||
const unsigned long ec = ESR_ELx_EC(esr);
|
||||
@ -386,6 +392,12 @@ static inline bool esr_is_data_abort(unsigned long esr)
|
||||
return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
|
||||
}
|
||||
|
||||
static inline bool esr_is_cfi_brk(unsigned long esr)
|
||||
{
|
||||
return ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
|
||||
(esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE;
|
||||
}
|
||||
|
||||
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
|
||||
{
|
||||
/* Translation fault, level -1 */
|
||||
|
@ -102,7 +102,6 @@
|
||||
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
|
||||
#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
|
||||
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
|
@ -232,6 +232,8 @@ extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
|
||||
phys_addr_t start, unsigned long pages);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
|
||||
|
||||
extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
@ -11,6 +11,7 @@
|
||||
#ifndef __ARM64_KVM_EMULATE_H__
|
||||
#define __ARM64_KVM_EMULATE_H__
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/debug-monitors.h>
|
||||
@ -55,6 +56,14 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
|
||||
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
|
||||
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) |
|
||||
ESR_ELx_IL;
|
||||
|
||||
kvm_inject_nested_sync(vcpu, esr);
|
||||
}
|
||||
|
||||
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
|
||||
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -69,39 +78,17 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
if (has_vhe() || has_hvhe())
|
||||
vcpu->arch.hcr_el2 |= HCR_E2H;
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
|
||||
/* route synchronous external abort exceptions to EL2 */
|
||||
vcpu->arch.hcr_el2 |= HCR_TEA;
|
||||
/* trap error record accesses */
|
||||
vcpu->arch.hcr_el2 |= HCR_TERR;
|
||||
}
|
||||
if (!vcpu_has_run_once(vcpu))
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
|
||||
vcpu->arch.hcr_el2 |= HCR_FWB;
|
||||
} else {
|
||||
/*
|
||||
* For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
|
||||
* get set in SCTLR_EL1 such that we can detect when the guest
|
||||
* MMU gets turned on and do the necessary cache maintenance
|
||||
* then.
|
||||
*/
|
||||
/*
|
||||
* For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
|
||||
* get set in SCTLR_EL1 such that we can detect when the guest
|
||||
* MMU gets turned on and do the necessary cache maintenance
|
||||
* then.
|
||||
*/
|
||||
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||
vcpu->arch.hcr_el2 |= HCR_TVM;
|
||||
}
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
|
||||
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
|
||||
vcpu->arch.hcr_el2 |= HCR_TID4;
|
||||
else
|
||||
vcpu->arch.hcr_el2 |= HCR_TID2;
|
||||
|
||||
if (vcpu_el1_is_32bit(vcpu))
|
||||
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
||||
|
||||
if (kvm_has_mte(vcpu->kvm))
|
||||
vcpu->arch.hcr_el2 |= HCR_ATA;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
|
||||
@ -660,4 +647,50 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_write_cptr_el2(val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
|
||||
* format if E2H isn't set.
|
||||
*/
|
||||
static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
|
||||
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
cptr = translate_cptr_el2_to_cpacr_el1(cptr);
|
||||
|
||||
return cptr;
|
||||
}
|
||||
|
||||
static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
|
||||
unsigned int xen)
|
||||
{
|
||||
switch (xen) {
|
||||
case 0b00:
|
||||
case 0b10:
|
||||
return true;
|
||||
case 0b01:
|
||||
return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
|
||||
case 0b11:
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
|
||||
(!vcpu_has_nv(vcpu) ? false : \
|
||||
____cptr_xen_trap_enabled(vcpu, \
|
||||
SYS_FIELD_GET(CPACR_ELx, xen, \
|
||||
vcpu_sanitised_cptr_el2(vcpu))))
|
||||
|
||||
static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
|
||||
}
|
||||
|
||||
static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_EMULATE_H__ */
|
||||
|
@ -189,6 +189,33 @@ struct kvm_s2_mmu {
|
||||
uint64_t split_page_chunk_size;
|
||||
|
||||
struct kvm_arch *arch;
|
||||
|
||||
/*
|
||||
* For a shadow stage-2 MMU, the virtual vttbr used by the
|
||||
* host to parse the guest S2.
|
||||
* This either contains:
|
||||
* - the virtual VTTBR programmed by the guest hypervisor with
|
||||
* CnP cleared
|
||||
* - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
|
||||
*
|
||||
* We also cache the full VTCR which gets used for TLB invalidation,
|
||||
* taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted
|
||||
* to be cached in a TLB" to the letter.
|
||||
*/
|
||||
u64 tlb_vttbr;
|
||||
u64 tlb_vtcr;
|
||||
|
||||
/*
|
||||
* true when this represents a nested context where virtual
|
||||
* HCR_EL2.VM == 1
|
||||
*/
|
||||
bool nested_stage2_enabled;
|
||||
|
||||
/*
|
||||
* 0: Nobody is currently using this, check vttbr for validity
|
||||
* >0: Somebody is actively using this.
|
||||
*/
|
||||
atomic_t refcnt;
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
@ -256,6 +283,14 @@ struct kvm_arch {
|
||||
*/
|
||||
u64 fgu[__NR_FGT_GROUP_IDS__];
|
||||
|
||||
/*
|
||||
* Stage 2 paging state for VMs with nested S2 using a virtual
|
||||
* VMID.
|
||||
*/
|
||||
struct kvm_s2_mmu *nested_mmus;
|
||||
size_t nested_mmus_size;
|
||||
int nested_mmus_next;
|
||||
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
|
||||
@ -327,11 +362,11 @@ struct kvm_arch {
|
||||
* Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
|
||||
*/
|
||||
#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
|
||||
#define IDX_IDREG(idx) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
|
||||
#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
|
||||
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
|
||||
u64 id_regs[KVM_ARM_ID_REG_NUM];
|
||||
|
||||
u64 ctr_el0;
|
||||
|
||||
/* Masks for VNCR-baked sysregs */
|
||||
struct kvm_sysreg_masks *sysreg_masks;
|
||||
|
||||
@ -423,6 +458,7 @@ enum vcpu_sysreg {
|
||||
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
|
||||
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
|
||||
HACR_EL2, /* Hypervisor Auxiliary Control Register */
|
||||
ZCR_EL2, /* SVE Control Register (EL2) */
|
||||
TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
|
||||
TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
|
||||
TCR_EL2, /* Translation Control Register (EL2) */
|
||||
@ -867,6 +903,9 @@ struct kvm_vcpu_arch {
|
||||
|
||||
#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
|
||||
|
||||
#define vcpu_sve_zcr_elx(vcpu) \
|
||||
(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
|
||||
|
||||
#define vcpu_sve_state_size(vcpu) ({ \
|
||||
size_t __size_ret; \
|
||||
unsigned int __vcpu_vq; \
|
||||
@ -991,6 +1030,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
|
||||
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
|
||||
case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
@ -1036,6 +1076,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
|
||||
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
|
||||
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
@ -1145,7 +1186,7 @@ int __init populate_nv_trap_config(void);
|
||||
bool lock_all_vcpus(struct kvm *kvm);
|
||||
void unlock_all_vcpus(struct kvm *kvm);
|
||||
|
||||
void kvm_init_sysreg(struct kvm_vcpu *);
|
||||
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* MMIO helpers */
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
@ -1306,6 +1347,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
|
||||
|
||||
int __init kvm_set_ipa_limit(void);
|
||||
u32 kvm_get_pa_bits(struct kvm *kvm);
|
||||
|
||||
#define __KVM_HAVE_ARCH_VM_ALLOC
|
||||
struct kvm *kvm_arch_alloc_vm(void);
|
||||
@ -1355,6 +1397,24 @@ static inline void kvm_hyp_reserve(void) { }
|
||||
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7):
|
||||
return &ka->id_regs[IDREG_IDX(reg)];
|
||||
case SYS_CTR_EL0:
|
||||
return &ka->ctr_el0;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#define kvm_read_vm_id_reg(kvm, reg) \
|
||||
({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
|
||||
|
||||
void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
||||
|
||||
#define __expand_field_sign_unsigned(id, fld, val) \
|
||||
((u64)SYS_FIELD_VALUE(id, fld, val))
|
||||
|
||||
@ -1371,7 +1431,7 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
|
||||
|
||||
#define get_idreg_field_unsigned(kvm, id, fld) \
|
||||
({ \
|
||||
u64 __val = IDREG((kvm), SYS_##id); \
|
||||
u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \
|
||||
FIELD_GET(id##_##fld##_MASK, __val); \
|
||||
})
|
||||
|
||||
|
@ -124,8 +124,8 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
#endif
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
|
||||
phys_addr_t pgd, void *sp, void *cont_fn);
|
||||
void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
|
||||
void (*fn)(void));
|
||||
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
||||
unsigned long *per_cpu_base, u32 hyp_va_bits);
|
||||
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
||||
|
@ -98,6 +98,7 @@ alternative_cb_end
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_nested.h>
|
||||
|
||||
void kvm_update_va_mask(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
@ -165,6 +166,10 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
|
||||
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
|
||||
void __init free_hyp_pgds(void);
|
||||
|
||||
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
|
||||
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
|
||||
void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
|
||||
|
||||
void stage2_unmap_vm(struct kvm *kvm);
|
||||
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
|
||||
void kvm_uninit_stage2_mmu(struct kvm *kvm);
|
||||
@ -326,5 +331,26 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
return container_of(mmu->arch, struct kvm, arch);
|
||||
}
|
||||
|
||||
static inline u64 get_vmid(u64 vttbr)
|
||||
{
|
||||
return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
|
||||
VTTBR_VMID_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
return !(mmu->tlb_vttbr & VTTBR_CNP_BIT);
|
||||
}
|
||||
|
||||
static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
/*
|
||||
* Be careful, mmu may not be fully initialised so do look at
|
||||
* *any* of its fields.
|
||||
*/
|
||||
return &kvm->arch.mmu != mmu;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ARM64_KVM_MMU_H__ */
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
|
||||
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -32,7 +33,7 @@ static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
|
||||
|
||||
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
|
||||
{
|
||||
u64 cpacr_el1 = 0;
|
||||
u64 cpacr_el1 = CPACR_ELx_RES1;
|
||||
|
||||
if (cptr_el2 & CPTR_EL2_TTA)
|
||||
cpacr_el1 |= CPACR_ELx_TTA;
|
||||
@ -41,6 +42,8 @@ static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
|
||||
if (!(cptr_el2 & CPTR_EL2_TZ))
|
||||
cpacr_el1 |= CPACR_ELx_ZEN;
|
||||
|
||||
cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
|
||||
|
||||
return cpacr_el1;
|
||||
}
|
||||
|
||||
@ -61,6 +64,125 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
|
||||
}
|
||||
|
||||
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
|
||||
extern void kvm_init_nested(struct kvm *kvm);
|
||||
extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
|
||||
extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
|
||||
extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
|
||||
|
||||
union tlbi_info;
|
||||
|
||||
extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
|
||||
const union tlbi_info *info,
|
||||
void (*)(struct kvm_s2_mmu *,
|
||||
const union tlbi_info *));
|
||||
extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
|
||||
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_s2_trans {
|
||||
phys_addr_t output;
|
||||
unsigned long block_size;
|
||||
bool writable;
|
||||
bool readable;
|
||||
int level;
|
||||
u32 esr;
|
||||
u64 upper_attr;
|
||||
};
|
||||
|
||||
static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
|
||||
{
|
||||
return trans->output;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
|
||||
{
|
||||
return trans->block_size;
|
||||
}
|
||||
|
||||
static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
|
||||
{
|
||||
return trans->esr;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
|
||||
{
|
||||
return trans->readable;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
|
||||
{
|
||||
return trans->writable;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
|
||||
{
|
||||
return !(trans->upper_attr & BIT(54));
|
||||
}
|
||||
|
||||
extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
|
||||
struct kvm_s2_trans *result);
|
||||
extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s2_trans *trans);
|
||||
extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
|
||||
extern void kvm_nested_s2_wp(struct kvm *kvm);
|
||||
extern void kvm_nested_s2_unmap(struct kvm *kvm);
|
||||
extern void kvm_nested_s2_flush(struct kvm *kvm);
|
||||
|
||||
unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
|
||||
|
||||
static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
|
||||
{
|
||||
struct kvm *kvm = vpcu->kvm;
|
||||
u8 CRm = sys_reg_CRm(instr);
|
||||
|
||||
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
|
||||
sys_reg_Op1(instr) == TLBI_Op1_EL1))
|
||||
return false;
|
||||
|
||||
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
|
||||
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
||||
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
|
||||
return false;
|
||||
|
||||
if (CRm == TLBI_CRm_nROS &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
return false;
|
||||
|
||||
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
|
||||
CRm == TLBI_CRm_RNS) &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
|
||||
{
|
||||
struct kvm *kvm = vpcu->kvm;
|
||||
u8 CRm = sys_reg_CRm(instr);
|
||||
|
||||
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
|
||||
sys_reg_Op1(instr) == TLBI_Op1_EL2))
|
||||
return false;
|
||||
|
||||
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
|
||||
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
||||
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
|
||||
return false;
|
||||
|
||||
if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
|
||||
return false;
|
||||
|
||||
if (CRm == TLBI_CRm_nROS &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
return false;
|
||||
|
||||
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
|
||||
CRm == TLBI_CRm_RNS) &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int kvm_init_nv_sysregs(struct kvm *kvm);
|
||||
|
||||
@ -76,4 +198,11 @@ static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
|
||||
}
|
||||
#endif
|
||||
|
||||
#define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
|
||||
|
||||
static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
|
||||
{
|
||||
return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_NESTED_H */
|
||||
|
@ -654,6 +654,23 @@
|
||||
#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7)
|
||||
|
||||
/* TLBI instructions */
|
||||
#define TLBI_Op0 1
|
||||
|
||||
#define TLBI_Op1_EL1 0 /* Accessible from EL1 or higher */
|
||||
#define TLBI_Op1_EL2 4 /* Accessible from EL2 or higher */
|
||||
|
||||
#define TLBI_CRn_XS 8 /* Extra Slow (the common one) */
|
||||
#define TLBI_CRn_nXS 9 /* not Extra Slow (which nobody uses)*/
|
||||
|
||||
#define TLBI_CRm_IPAIS 0 /* S2 Inner-Shareable */
|
||||
#define TLBI_CRm_nROS 1 /* non-Range, Outer-Sharable */
|
||||
#define TLBI_CRm_RIS 2 /* Range, Inner-Sharable */
|
||||
#define TLBI_CRm_nRIS 3 /* non-Range, Inner-Sharable */
|
||||
#define TLBI_CRm_IPAONS 4 /* S2 Outer and Non-Shareable */
|
||||
#define TLBI_CRm_ROS 5 /* Range, Outer-Sharable */
|
||||
#define TLBI_CRm_RNS 6 /* Range, Non-Sharable */
|
||||
#define TLBI_CRm_nRNS 7 /* non-Range, Non-Sharable */
|
||||
|
||||
#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0)
|
||||
#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1)
|
||||
#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2)
|
||||
|
@ -128,6 +128,7 @@ int main(void)
|
||||
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
|
||||
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
|
||||
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
|
||||
DEFINE(CPU_ELR_EL2, offsetof(struct kvm_cpu_context, sys_regs[ELR_EL2]));
|
||||
DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
|
||||
DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1]));
|
||||
DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
|
||||
|
@ -312,9 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned long esr)
|
||||
* entirely not preemptible, and we can use rcu list safely here.
|
||||
*/
|
||||
list_for_each_entry_rcu(hook, list, node) {
|
||||
unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
|
||||
|
||||
if ((comment & ~hook->mask) == hook->imm)
|
||||
if ((esr_brk_comment(esr) & ~hook->mask) == hook->imm)
|
||||
fn = hook->fn;
|
||||
}
|
||||
|
||||
|
@ -1105,8 +1105,6 @@ static struct break_hook ubsan_break_hook = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK)
|
||||
|
||||
/*
|
||||
* Initial handler for AArch64 BRK exceptions
|
||||
* This handler only used until debug_traps_init().
|
||||
@ -1115,15 +1113,15 @@ int __init early_brk64(unsigned long addr, unsigned long esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE)
|
||||
if (esr_is_cfi_brk(esr))
|
||||
return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
#endif
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
|
||||
if ((esr_brk_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
|
||||
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
#endif
|
||||
#ifdef CONFIG_UBSAN_TRAP
|
||||
if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM)
|
||||
if ((esr_brk_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM)
|
||||
return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
#endif
|
||||
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
|
@ -48,6 +48,15 @@
|
||||
|
||||
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
|
||||
|
||||
enum kvm_wfx_trap_policy {
|
||||
KVM_WFX_NOTRAP_SINGLE_TASK, /* Default option */
|
||||
KVM_WFX_NOTRAP,
|
||||
KVM_WFX_TRAP,
|
||||
};
|
||||
|
||||
static enum kvm_wfx_trap_policy kvm_wfi_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
|
||||
static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
|
||||
|
||||
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
@ -170,6 +179,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
mutex_unlock(&kvm->lock);
|
||||
#endif
|
||||
|
||||
kvm_init_nested(kvm);
|
||||
|
||||
ret = kvm_share_hyp(kvm, kvm + 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -546,11 +557,32 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static bool kvm_vcpu_should_clear_twi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (unlikely(kvm_wfi_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
|
||||
return kvm_wfi_trap_policy == KVM_WFX_NOTRAP;
|
||||
|
||||
return single_task_running() &&
|
||||
(atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
|
||||
vcpu->kvm->arch.vgic.nassgireq);
|
||||
}
|
||||
|
||||
static bool kvm_vcpu_should_clear_twe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (unlikely(kvm_wfe_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
|
||||
return kvm_wfe_trap_policy == KVM_WFX_NOTRAP;
|
||||
|
||||
return single_task_running();
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvm_s2_mmu *mmu;
|
||||
int *last_ran;
|
||||
|
||||
if (vcpu_has_nv(vcpu))
|
||||
kvm_vcpu_load_hw_mmu(vcpu);
|
||||
|
||||
mmu = vcpu->arch.hw_mmu;
|
||||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
@ -579,10 +611,15 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
|
||||
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
|
||||
|
||||
if (single_task_running())
|
||||
vcpu_clear_wfx_traps(vcpu);
|
||||
if (kvm_vcpu_should_clear_twe(vcpu))
|
||||
vcpu->arch.hcr_el2 &= ~HCR_TWE;
|
||||
else
|
||||
vcpu_set_wfx_traps(vcpu);
|
||||
vcpu->arch.hcr_el2 |= HCR_TWE;
|
||||
|
||||
if (kvm_vcpu_should_clear_twi(vcpu))
|
||||
vcpu->arch.hcr_el2 &= ~HCR_TWI;
|
||||
else
|
||||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||
|
||||
vcpu_set_pauth_traps(vcpu);
|
||||
|
||||
@ -601,6 +638,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
kvm_timer_vcpu_put(vcpu);
|
||||
kvm_vgic_put(vcpu);
|
||||
kvm_vcpu_pmu_restore_host(vcpu);
|
||||
if (vcpu_has_nv(vcpu))
|
||||
kvm_vcpu_put_hw_mmu(vcpu);
|
||||
kvm_arm_vmid_clear_active();
|
||||
|
||||
vcpu_clear_on_unsupported_cpu(vcpu);
|
||||
@ -797,7 +836,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
* This needs to happen after NV has imposed its own restrictions on
|
||||
* the feature set
|
||||
*/
|
||||
kvm_init_sysreg(vcpu);
|
||||
kvm_calculate_traps(vcpu);
|
||||
|
||||
ret = kvm_timer_enable(vcpu);
|
||||
if (ret)
|
||||
@ -1419,11 +1458,6 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
|
||||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
|
||||
return -EINVAL;
|
||||
|
||||
/* Disallow NV+SVE for the time being */
|
||||
if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) &&
|
||||
test_bit(KVM_ARM_VCPU_SVE, &features))
|
||||
return -EINVAL;
|
||||
|
||||
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
|
||||
return 0;
|
||||
|
||||
@ -1459,6 +1493,10 @@ static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
|
||||
if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
|
||||
ret = kvm_arm_set_default_pmu(kvm);
|
||||
|
||||
/* Prepare for nested if required */
|
||||
if (!ret && vcpu_has_nv(vcpu))
|
||||
ret = kvm_vcpu_init_nested(vcpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2858,6 +2896,36 @@ static int __init early_kvm_mode_cfg(char *arg)
|
||||
}
|
||||
early_param("kvm-arm.mode", early_kvm_mode_cfg);
|
||||
|
||||
static int __init early_kvm_wfx_trap_policy_cfg(char *arg, enum kvm_wfx_trap_policy *p)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (strcmp(arg, "trap") == 0) {
|
||||
*p = KVM_WFX_TRAP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strcmp(arg, "notrap") == 0) {
|
||||
*p = KVM_WFX_NOTRAP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init early_kvm_wfi_trap_policy_cfg(char *arg)
|
||||
{
|
||||
return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfi_trap_policy);
|
||||
}
|
||||
early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg);
|
||||
|
||||
static int __init early_kvm_wfe_trap_policy_cfg(char *arg)
|
||||
{
|
||||
return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfe_trap_policy);
|
||||
}
|
||||
early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg);
|
||||
|
||||
enum kvm_mode kvm_get_mode(void)
|
||||
{
|
||||
return kvm_mode;
|
||||
|
@ -79,6 +79,12 @@ enum cgt_group_id {
|
||||
CGT_MDCR_E2TB,
|
||||
CGT_MDCR_TDCC,
|
||||
|
||||
CGT_CPACR_E0POE,
|
||||
CGT_CPTR_TAM,
|
||||
CGT_CPTR_TCPAC,
|
||||
|
||||
CGT_HCRX_TCR2En,
|
||||
|
||||
/*
|
||||
* Anything after this point is a combination of coarse trap
|
||||
* controls, which must all be evaluated to decide what to do.
|
||||
@ -89,6 +95,7 @@ enum cgt_group_id {
|
||||
CGT_HCR_TTLB_TTLBIS,
|
||||
CGT_HCR_TTLB_TTLBOS,
|
||||
CGT_HCR_TVM_TRVM,
|
||||
CGT_HCR_TVM_TRVM_HCRX_TCR2En,
|
||||
CGT_HCR_TPU_TICAB,
|
||||
CGT_HCR_TPU_TOCU,
|
||||
CGT_HCR_NV1_nNV2_ENSCXT,
|
||||
@ -106,6 +113,8 @@ enum cgt_group_id {
|
||||
CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__,
|
||||
CGT_CNTHCTL_EL1PTEN,
|
||||
|
||||
CGT_CPTR_TTA,
|
||||
|
||||
/* Must be last */
|
||||
__NR_CGT_GROUP_IDS__
|
||||
};
|
||||
@ -345,6 +354,30 @@ static const struct trap_bits coarse_trap_bits[] = {
|
||||
.mask = MDCR_EL2_TDCC,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
},
|
||||
[CGT_CPACR_E0POE] = {
|
||||
.index = CPTR_EL2,
|
||||
.value = CPACR_ELx_E0POE,
|
||||
.mask = CPACR_ELx_E0POE,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
},
|
||||
[CGT_CPTR_TAM] = {
|
||||
.index = CPTR_EL2,
|
||||
.value = CPTR_EL2_TAM,
|
||||
.mask = CPTR_EL2_TAM,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
},
|
||||
[CGT_CPTR_TCPAC] = {
|
||||
.index = CPTR_EL2,
|
||||
.value = CPTR_EL2_TCPAC,
|
||||
.mask = CPTR_EL2_TCPAC,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
},
|
||||
[CGT_HCRX_TCR2En] = {
|
||||
.index = HCRX_EL2,
|
||||
.value = 0,
|
||||
.mask = HCRX_EL2_TCR2En,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
},
|
||||
};
|
||||
|
||||
#define MCB(id, ...) \
|
||||
@ -359,6 +392,8 @@ static const enum cgt_group_id *coarse_control_combo[] = {
|
||||
MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS),
|
||||
MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS),
|
||||
MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM),
|
||||
MCB(CGT_HCR_TVM_TRVM_HCRX_TCR2En,
|
||||
CGT_HCR_TVM, CGT_HCR_TRVM, CGT_HCRX_TCR2En),
|
||||
MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB),
|
||||
MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU),
|
||||
MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT),
|
||||
@ -410,12 +445,26 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
|
||||
return BEHAVE_FORWARD_ANY;
|
||||
}
|
||||
|
||||
static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val = __vcpu_sys_reg(vcpu, CPTR_EL2);
|
||||
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
val = translate_cptr_el2_to_cpacr_el1(val);
|
||||
|
||||
if (val & CPACR_ELx_TTA)
|
||||
return BEHAVE_FORWARD_ANY;
|
||||
|
||||
return BEHAVE_HANDLE_LOCALLY;
|
||||
}
|
||||
|
||||
#define CCC(id, fn) \
|
||||
[id - __COMPLEX_CONDITIONS__] = fn
|
||||
|
||||
static const complex_condition_check ccc[] = {
|
||||
CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
|
||||
CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
|
||||
CCC(CGT_CPTR_TTA, check_cptr_tta),
|
||||
};
|
||||
|
||||
/*
|
||||
@ -622,6 +671,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
|
||||
SR_TRAP(SYS_MAIR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_AMAIR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_CONTEXTIDR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_TCR2_EL1, CGT_HCR_TVM_TRVM_HCRX_TCR2En),
|
||||
SR_TRAP(SYS_DC_ZVA, CGT_HCR_TDZ),
|
||||
SR_TRAP(SYS_DC_GVA, CGT_HCR_TDZ),
|
||||
SR_TRAP(SYS_DC_GZVA, CGT_HCR_TDZ),
|
||||
@ -1000,6 +1050,59 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
|
||||
SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB),
|
||||
SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB),
|
||||
SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB),
|
||||
SR_TRAP(SYS_CPACR_EL1, CGT_CPTR_TCPAC),
|
||||
SR_TRAP(SYS_AMUSERENR_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMCFGR_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMCGCR_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMCNTENCLR0_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMCNTENCLR1_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMCNTENSET0_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMCNTENSET1_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMCR_EL0, CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR0_EL0(0), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR0_EL0(1), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR0_EL0(2), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR0_EL0(3), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(0), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(1), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(2), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(3), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(4), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(5), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(6), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(7), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(8), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(9), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(10), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(11), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(12), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(13), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(14), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVCNTR1_EL0(15), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER0_EL0(0), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER0_EL0(1), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER0_EL0(2), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER0_EL0(3), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(0), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(1), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(2), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(3), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(4), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(5), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(6), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(7), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(8), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(9), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(10), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(11), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(12), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(13), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(14), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(15), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_POR_EL0, CGT_CPACR_E0POE),
|
||||
/* op0=2, op1=1, and CRn<0b1000 */
|
||||
SR_RANGE_TRAP(sys_reg(2, 1, 0, 0, 0),
|
||||
sys_reg(2, 1, 7, 15, 7), CGT_CPTR_TTA),
|
||||
SR_TRAP(SYS_CNTP_TVAL_EL0, CGT_CNTHCTL_EL1PTEN),
|
||||
SR_TRAP(SYS_CNTP_CVAL_EL0, CGT_CNTHCTL_EL1PTEN),
|
||||
SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),
|
||||
@ -1071,6 +1174,7 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
|
||||
SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1),
|
||||
SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1),
|
||||
SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1),
|
||||
SR_FGT(SYS_TCR2_EL1, HFGxTR, TCR_EL1, 1),
|
||||
SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1),
|
||||
SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1),
|
||||
SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1),
|
||||
|
@ -178,7 +178,13 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (guest_owns_fp_regs()) {
|
||||
if (vcpu_has_sve(vcpu)) {
|
||||
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
||||
u64 zcr = read_sysreg_el1(SYS_ZCR);
|
||||
|
||||
/*
|
||||
* If the vCPU is in the hyp context then ZCR_EL1 is
|
||||
* loaded with its vEL2 counterpart.
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
|
||||
|
||||
/*
|
||||
* Restore the VL that was saved when bound to the CPU,
|
||||
@ -189,11 +195,14 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
* Note that this means that at guest exit ZCR_EL1 is
|
||||
* not necessarily the same as on guest entry.
|
||||
*
|
||||
* Restoring the VL isn't needed in VHE mode since
|
||||
* ZCR_EL2 (accessed via ZCR_EL1) would fulfill the same
|
||||
* role when doing the save from EL2.
|
||||
* ZCR_EL2 holds the guest hypervisor's VL when running
|
||||
* a nested guest, which could be smaller than the
|
||||
* max for the vCPU. Similar to above, we first need to
|
||||
* switch to a VL consistent with the layout of the
|
||||
* vCPU's SVE state. KVM support for NV implies VHE, so
|
||||
* using the ZCR_EL1 alias is safe.
|
||||
*/
|
||||
if (!has_vhe())
|
||||
if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
|
||||
SYS_ZCR_EL1);
|
||||
}
|
||||
|
@ -94,11 +94,19 @@ static int handle_smc(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Guest access to FP/ASIMD registers are routed to this handler only
|
||||
* when the system doesn't support FP/ASIMD.
|
||||
* This handles the cases where the system does not support FP/ASIMD or when
|
||||
* we are running nested virtualization and the guest hypervisor is trapping
|
||||
* FP/ASIMD accesses by its guest guest.
|
||||
*
|
||||
* All other handling of guest vs. host FP/ASIMD register state is handled in
|
||||
* fixup_guest_exit().
|
||||
*/
|
||||
static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
|
||||
static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (guest_hyp_fpsimd_traps_enabled(vcpu))
|
||||
return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
|
||||
|
||||
/* This is the case when the system doesn't support FP/ASIMD. */
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
@ -209,6 +217,9 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
static int handle_sve(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (guest_hyp_sve_traps_enabled(vcpu))
|
||||
return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
@ -304,7 +315,7 @@ static exit_handle_fn arm_exit_handlers[] = {
|
||||
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
|
||||
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
|
||||
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
|
||||
[ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
|
||||
[ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd,
|
||||
[ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
|
||||
};
|
||||
|
||||
@ -411,6 +422,20 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
|
||||
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
|
||||
}
|
||||
|
||||
static void print_nvhe_hyp_panic(const char *name, u64 panic_addr)
|
||||
{
|
||||
kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
|
||||
(void *)(panic_addr + kaslr_offset()));
|
||||
}
|
||||
|
||||
static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
|
||||
{
|
||||
print_nvhe_hyp_panic("CFI failure", panic_addr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
|
||||
kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
|
||||
}
|
||||
|
||||
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
|
||||
u64 elr_virt, u64 elr_phys,
|
||||
u64 par, uintptr_t vcpu,
|
||||
@ -423,7 +448,7 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
|
||||
if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
|
||||
kvm_err("Invalid host exception to nVHE hyp!\n");
|
||||
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
|
||||
(esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
|
||||
esr_brk_comment(esr) == BUG_BRK_IMM) {
|
||||
const char *file = NULL;
|
||||
unsigned int line = 0;
|
||||
|
||||
@ -439,11 +464,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
|
||||
if (file)
|
||||
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
|
||||
else
|
||||
kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
|
||||
(void *)(panic_addr + kaslr_offset()));
|
||||
print_nvhe_hyp_panic("BUG", panic_addr);
|
||||
} else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
|
||||
kvm_nvhe_report_cfi_failure(panic_addr);
|
||||
} else {
|
||||
kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
|
||||
(void *)(panic_addr + kaslr_offset()));
|
||||
print_nvhe_hyp_panic("panic", panic_addr);
|
||||
}
|
||||
|
||||
/* Dump the nVHE hypervisor backtrace */
|
||||
|
@ -83,6 +83,14 @@ alternative_else_nop_endif
|
||||
eret
|
||||
sb
|
||||
|
||||
SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
|
||||
// x2-x29,lr: vcpu regs
|
||||
// vcpu x0-x1 on the stack
|
||||
|
||||
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
||||
ldr x0, [x0, #CPU_ELR_EL2]
|
||||
msr elr_el2, x0
|
||||
|
||||
SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
||||
// x2-x29,lr: vcpu regs
|
||||
// vcpu x0-x1 on the stack
|
||||
|
@ -314,11 +314,24 @@ static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
|
||||
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* The vCPU's saved SVE state layout always matches the max VL of the
|
||||
* vCPU. Start off with the max VL so we can load the SVE state.
|
||||
*/
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||
__sve_restore_state(vcpu_sve_pffr(vcpu),
|
||||
&vcpu->arch.ctxt.fp_regs.fpsr,
|
||||
true);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
|
||||
|
||||
/*
|
||||
* The effective VL for a VM could differ from the max VL when running a
|
||||
* nested guest, as the guest hypervisor could select a smaller VL. Slap
|
||||
* that into hardware before wrapping up.
|
||||
*/
|
||||
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
|
||||
sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
|
||||
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
|
||||
}
|
||||
|
||||
static inline void __hyp_sve_save_host(void)
|
||||
@ -354,10 +367,19 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
/* Only handle traps the vCPU can support here: */
|
||||
switch (esr_ec) {
|
||||
case ESR_ELx_EC_FP_ASIMD:
|
||||
/* Forward traps to the guest hypervisor as required */
|
||||
if (guest_hyp_fpsimd_traps_enabled(vcpu))
|
||||
return false;
|
||||
break;
|
||||
case ESR_ELx_EC_SYS64:
|
||||
if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
|
||||
return false;
|
||||
fallthrough;
|
||||
case ESR_ELx_EC_SVE:
|
||||
if (!sve_guest)
|
||||
return false;
|
||||
if (guest_hyp_sve_traps_enabled(vcpu))
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
@ -693,7 +715,7 @@ guest:
|
||||
|
||||
static inline void __kvm_unexpected_el2_exception(void)
|
||||
{
|
||||
extern char __guest_exit_panic[];
|
||||
extern char __guest_exit_restore_elr_and_panic[];
|
||||
unsigned long addr, fixup;
|
||||
struct kvm_exception_table_entry *entry, *end;
|
||||
unsigned long elr_el2 = read_sysreg(elr_el2);
|
||||
@ -715,7 +737,8 @@ static inline void __kvm_unexpected_el2_exception(void)
|
||||
}
|
||||
|
||||
/* Trigger a panic after restoring the hyp context. */
|
||||
write_sysreg(__guest_exit_panic, elr_el2);
|
||||
this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
|
||||
write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
|
||||
|
@ -55,6 +55,17 @@ static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
|
||||
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
|
||||
}
|
||||
|
||||
static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_TCR2))
|
||||
return false;
|
||||
|
||||
vcpu = ctxt_to_vcpu(ctxt);
|
||||
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, TCRX, IMP);
|
||||
}
|
||||
|
||||
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
|
||||
@ -62,8 +73,14 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
ctxt_sys_reg(ctxt, TTBR0_EL1) = read_sysreg_el1(SYS_TTBR0);
|
||||
ctxt_sys_reg(ctxt, TTBR1_EL1) = read_sysreg_el1(SYS_TTBR1);
|
||||
ctxt_sys_reg(ctxt, TCR_EL1) = read_sysreg_el1(SYS_TCR);
|
||||
if (cpus_have_final_cap(ARM64_HAS_TCR2))
|
||||
if (ctxt_has_tcrx(ctxt)) {
|
||||
ctxt_sys_reg(ctxt, TCR2_EL1) = read_sysreg_el1(SYS_TCR2);
|
||||
|
||||
if (ctxt_has_s1pie(ctxt)) {
|
||||
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
|
||||
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
|
||||
}
|
||||
}
|
||||
ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR);
|
||||
ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0);
|
||||
ctxt_sys_reg(ctxt, AFSR1_EL1) = read_sysreg_el1(SYS_AFSR1);
|
||||
@ -73,10 +90,6 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
|
||||
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
|
||||
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
|
||||
if (ctxt_has_s1pie(ctxt)) {
|
||||
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
|
||||
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
|
||||
}
|
||||
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
|
||||
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
|
||||
|
||||
@ -138,8 +151,14 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1);
|
||||
if (cpus_have_final_cap(ARM64_HAS_TCR2))
|
||||
if (ctxt_has_tcrx(ctxt)) {
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, TCR2_EL1), SYS_TCR2);
|
||||
|
||||
if (ctxt_has_s1pie(ctxt)) {
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
|
||||
}
|
||||
}
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1);
|
||||
@ -149,10 +168,6 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
|
||||
if (ctxt_has_s1pie(ctxt)) {
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
|
||||
}
|
||||
write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
|
||||
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
#define FFA_MIN_FUNC_NUM 0x60
|
||||
#define FFA_MAX_FUNC_NUM 0x7F
|
||||
#define FFA_MAX_FUNC_NUM 0xFF
|
||||
|
||||
int hyp_ffa_init(void *pages);
|
||||
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|
||||
|
@ -89,9 +89,9 @@ quiet_cmd_hyprel = HYPREL $@
|
||||
quiet_cmd_hypcopy = HYPCOPY $@
|
||||
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@
|
||||
|
||||
# Remove ftrace, Shadow Call Stack, and CFI CFLAGS.
|
||||
# This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations.
|
||||
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
|
||||
# Remove ftrace and Shadow Call Stack CFLAGS.
|
||||
# This is equivalent to the 'notrace' and '__noscs' annotations.
|
||||
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
|
||||
# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile'
|
||||
# when profile optimization is applied. gen-hyprel does not support SHT_REL and
|
||||
# causes a build failure. Remove profile optimization flags.
|
||||
|
@ -67,6 +67,9 @@ struct kvm_ffa_buffers {
|
||||
*/
|
||||
static struct kvm_ffa_buffers hyp_buffers;
|
||||
static struct kvm_ffa_buffers host_buffers;
|
||||
static u32 hyp_ffa_version;
|
||||
static bool has_version_negotiated;
|
||||
static hyp_spinlock_t version_lock;
|
||||
|
||||
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
|
||||
{
|
||||
@ -462,7 +465,7 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
|
||||
memcpy(buf, host_buffers.tx, fraglen);
|
||||
|
||||
ep_mem_access = (void *)buf +
|
||||
ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0);
|
||||
ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
|
||||
offset = ep_mem_access->composite_off;
|
||||
if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
|
||||
ret = FFA_RET_INVALID_PARAMETERS;
|
||||
@ -541,7 +544,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
|
||||
fraglen = res->a2;
|
||||
|
||||
ep_mem_access = (void *)buf +
|
||||
ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0);
|
||||
ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
|
||||
offset = ep_mem_access->composite_off;
|
||||
/*
|
||||
* We can trust the SPMD to get this right, but let's at least
|
||||
@ -651,91 +654,10 @@ out_handled:
|
||||
return true;
|
||||
}
|
||||
|
||||
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
|
||||
static int hyp_ffa_post_init(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
/*
|
||||
* There's no way we can tell what a non-standard SMC call might
|
||||
* be up to. Ideally, we would terminate these here and return
|
||||
* an error to the host, but sadly devices make use of custom
|
||||
* firmware calls for things like power management, debugging,
|
||||
* RNG access and crash reporting.
|
||||
*
|
||||
* Given that the architecture requires us to trust EL3 anyway,
|
||||
* we forward unrecognised calls on under the assumption that
|
||||
* the firmware doesn't expose a mechanism to access arbitrary
|
||||
* non-secure memory. Short of a per-device table of SMCs, this
|
||||
* is the best we can do.
|
||||
*/
|
||||
if (!is_ffa_call(func_id))
|
||||
return false;
|
||||
|
||||
switch (func_id) {
|
||||
case FFA_FEATURES:
|
||||
if (!do_ffa_features(&res, host_ctxt))
|
||||
return false;
|
||||
goto out_handled;
|
||||
/* Memory management */
|
||||
case FFA_FN64_RXTX_MAP:
|
||||
do_ffa_rxtx_map(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_RXTX_UNMAP:
|
||||
do_ffa_rxtx_unmap(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_SHARE:
|
||||
case FFA_FN64_MEM_SHARE:
|
||||
do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_RECLAIM:
|
||||
do_ffa_mem_reclaim(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_LEND:
|
||||
case FFA_FN64_MEM_LEND:
|
||||
do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_FRAG_TX:
|
||||
do_ffa_mem_frag_tx(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
}
|
||||
|
||||
if (ffa_call_supported(func_id))
|
||||
return false; /* Pass through */
|
||||
|
||||
ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
|
||||
out_handled:
|
||||
ffa_set_retval(host_ctxt, &res);
|
||||
return true;
|
||||
}
|
||||
|
||||
int hyp_ffa_init(void *pages)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
size_t min_rxtx_sz;
|
||||
void *tx, *rx;
|
||||
|
||||
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
|
||||
return 0;
|
||||
|
||||
arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
|
||||
if (res.a0 == FFA_RET_NOT_SUPPORTED)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Firmware returns the maximum supported version of the FF-A
|
||||
* implementation. Check that the returned version is
|
||||
* backwards-compatible with the hyp according to the rules in DEN0077A
|
||||
* v1.1 REL0 13.2.1.
|
||||
*
|
||||
* Of course, things are never simple when dealing with firmware. v1.1
|
||||
* broke ABI with v1.0 on several structures, which is itself
|
||||
* incompatible with the aforementioned versioning scheme. The
|
||||
* expectation is that v1.x implementations that do not support the v1.0
|
||||
* ABI return NOT_SUPPORTED rather than a version number, according to
|
||||
* DEN0077A v1.1 REL0 18.6.4.
|
||||
*/
|
||||
if (FFA_MAJOR_VERSION(res.a0) != 1)
|
||||
return -EOPNOTSUPP;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
|
||||
if (res.a0 != FFA_SUCCESS)
|
||||
@ -766,6 +688,199 @@ int hyp_ffa_init(void *pages)
|
||||
if (min_rxtx_sz > PAGE_SIZE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void do_ffa_version(struct arm_smccc_res *res,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
DECLARE_REG(u32, ffa_req_version, ctxt, 1);
|
||||
|
||||
if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
|
||||
res->a0 = FFA_RET_NOT_SUPPORTED;
|
||||
return;
|
||||
}
|
||||
|
||||
hyp_spin_lock(&version_lock);
|
||||
if (has_version_negotiated) {
|
||||
res->a0 = hyp_ffa_version;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the client driver tries to downgrade the version, we need to ask
|
||||
* first if TEE supports it.
|
||||
*/
|
||||
if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
|
||||
arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0,
|
||||
0, 0, 0, 0, 0,
|
||||
res);
|
||||
if (res->a0 == FFA_RET_NOT_SUPPORTED)
|
||||
goto unlock;
|
||||
|
||||
hyp_ffa_version = ffa_req_version;
|
||||
}
|
||||
|
||||
if (hyp_ffa_post_init())
|
||||
res->a0 = FFA_RET_NOT_SUPPORTED;
|
||||
else {
|
||||
has_version_negotiated = true;
|
||||
res->a0 = hyp_ffa_version;
|
||||
}
|
||||
unlock:
|
||||
hyp_spin_unlock(&version_lock);
|
||||
}
|
||||
|
||||
static void do_ffa_part_get(struct arm_smccc_res *res,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
DECLARE_REG(u32, uuid0, ctxt, 1);
|
||||
DECLARE_REG(u32, uuid1, ctxt, 2);
|
||||
DECLARE_REG(u32, uuid2, ctxt, 3);
|
||||
DECLARE_REG(u32, uuid3, ctxt, 4);
|
||||
DECLARE_REG(u32, flags, ctxt, 5);
|
||||
u32 count, partition_sz, copy_sz;
|
||||
|
||||
hyp_spin_lock(&host_buffers.lock);
|
||||
if (!host_buffers.rx) {
|
||||
ffa_to_smccc_res(res, FFA_RET_BUSY);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, uuid0, uuid1,
|
||||
uuid2, uuid3, flags, 0, 0,
|
||||
res);
|
||||
|
||||
if (res->a0 != FFA_SUCCESS)
|
||||
goto out_unlock;
|
||||
|
||||
count = res->a2;
|
||||
if (!count)
|
||||
goto out_unlock;
|
||||
|
||||
if (hyp_ffa_version > FFA_VERSION_1_0) {
|
||||
/* Get the number of partitions deployed in the system */
|
||||
if (flags & 0x1)
|
||||
goto out_unlock;
|
||||
|
||||
partition_sz = res->a3;
|
||||
} else {
|
||||
/* FFA_VERSION_1_0 lacks the size in the response */
|
||||
partition_sz = FFA_1_0_PARTITON_INFO_SZ;
|
||||
}
|
||||
|
||||
copy_sz = partition_sz * count;
|
||||
if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
|
||||
ffa_to_smccc_res(res, FFA_RET_ABORTED);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
|
||||
out_unlock:
|
||||
hyp_spin_unlock(&host_buffers.lock);
|
||||
}
|
||||
|
||||
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
/*
|
||||
* There's no way we can tell what a non-standard SMC call might
|
||||
* be up to. Ideally, we would terminate these here and return
|
||||
* an error to the host, but sadly devices make use of custom
|
||||
* firmware calls for things like power management, debugging,
|
||||
* RNG access and crash reporting.
|
||||
*
|
||||
* Given that the architecture requires us to trust EL3 anyway,
|
||||
* we forward unrecognised calls on under the assumption that
|
||||
* the firmware doesn't expose a mechanism to access arbitrary
|
||||
* non-secure memory. Short of a per-device table of SMCs, this
|
||||
* is the best we can do.
|
||||
*/
|
||||
if (!is_ffa_call(func_id))
|
||||
return false;
|
||||
|
||||
if (!has_version_negotiated && func_id != FFA_VERSION) {
|
||||
ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
|
||||
goto out_handled;
|
||||
}
|
||||
|
||||
switch (func_id) {
|
||||
case FFA_FEATURES:
|
||||
if (!do_ffa_features(&res, host_ctxt))
|
||||
return false;
|
||||
goto out_handled;
|
||||
/* Memory management */
|
||||
case FFA_FN64_RXTX_MAP:
|
||||
do_ffa_rxtx_map(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_RXTX_UNMAP:
|
||||
do_ffa_rxtx_unmap(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_SHARE:
|
||||
case FFA_FN64_MEM_SHARE:
|
||||
do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_RECLAIM:
|
||||
do_ffa_mem_reclaim(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_LEND:
|
||||
case FFA_FN64_MEM_LEND:
|
||||
do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_MEM_FRAG_TX:
|
||||
do_ffa_mem_frag_tx(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_VERSION:
|
||||
do_ffa_version(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
case FFA_PARTITION_INFO_GET:
|
||||
do_ffa_part_get(&res, host_ctxt);
|
||||
goto out_handled;
|
||||
}
|
||||
|
||||
if (ffa_call_supported(func_id))
|
||||
return false; /* Pass through */
|
||||
|
||||
ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
|
||||
out_handled:
|
||||
ffa_set_retval(host_ctxt, &res);
|
||||
return true;
|
||||
}
|
||||
|
||||
int hyp_ffa_init(void *pages)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
void *tx, *rx;
|
||||
|
||||
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
|
||||
return 0;
|
||||
|
||||
arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_1, 0, 0, 0, 0, 0, 0, &res);
|
||||
if (res.a0 == FFA_RET_NOT_SUPPORTED)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Firmware returns the maximum supported version of the FF-A
|
||||
* implementation. Check that the returned version is
|
||||
* backwards-compatible with the hyp according to the rules in DEN0077A
|
||||
* v1.1 REL0 13.2.1.
|
||||
*
|
||||
* Of course, things are never simple when dealing with firmware. v1.1
|
||||
* broke ABI with v1.0 on several structures, which is itself
|
||||
* incompatible with the aforementioned versioning scheme. The
|
||||
* expectation is that v1.x implementations that do not support the v1.0
|
||||
* ABI return NOT_SUPPORTED rather than a version number, according to
|
||||
* DEN0077A v1.1 REL0 18.6.4.
|
||||
*/
|
||||
if (FFA_MAJOR_VERSION(res.a0) != 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_1))
|
||||
hyp_ffa_version = res.a0;
|
||||
else
|
||||
hyp_ffa_version = FFA_VERSION_1_1;
|
||||
|
||||
tx = pages;
|
||||
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
|
||||
rx = pages;
|
||||
@ -787,5 +902,6 @@ int hyp_ffa_init(void *pages)
|
||||
.lock = __HYP_SPIN_LOCK_UNLOCKED,
|
||||
};
|
||||
|
||||
version_lock = __HYP_SPIN_LOCK_UNLOCKED;
|
||||
return 0;
|
||||
}
|
||||
|
@ -50,6 +50,9 @@
|
||||
#ifndef R_AARCH64_ABS64
|
||||
#define R_AARCH64_ABS64 257
|
||||
#endif
|
||||
#ifndef R_AARCH64_ABS32
|
||||
#define R_AARCH64_ABS32 258
|
||||
#endif
|
||||
#ifndef R_AARCH64_PREL64
|
||||
#define R_AARCH64_PREL64 260
|
||||
#endif
|
||||
@ -383,6 +386,9 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
|
||||
case R_AARCH64_ABS64:
|
||||
emit_rela_abs64(rela, sh_orig_name);
|
||||
break;
|
||||
/* Allow 32-bit absolute relocation, for kCFI type hashes. */
|
||||
case R_AARCH64_ABS32:
|
||||
break;
|
||||
/* Allow position-relative data relocations. */
|
||||
case R_AARCH64_PREL64:
|
||||
case R_AARCH64_PREL32:
|
||||
|
@ -197,12 +197,6 @@ SYM_FUNC_END(__host_hvc)
|
||||
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
|
||||
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
|
||||
|
||||
/* If a guest is loaded, panic out of it. */
|
||||
stp x0, x1, [sp, #-16]!
|
||||
get_loaded_vcpu x0, x1
|
||||
cbnz x0, __guest_exit_panic
|
||||
add sp, sp, #16
|
||||
|
||||
/*
|
||||
* The panic may not be clean if the exception is taken before the host
|
||||
* context has been saved by __host_exit or after the hyp context has
|
||||
|
@ -5,6 +5,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
@ -265,33 +266,38 @@ alternative_else_nop_endif
|
||||
|
||||
SYM_CODE_END(__kvm_handle_stub_hvc)
|
||||
|
||||
SYM_FUNC_START(__pkvm_init_switch_pgd)
|
||||
/*
|
||||
* void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
|
||||
* void (*fn)(void));
|
||||
*
|
||||
* SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly
|
||||
* using a physical pointer without triggering a kCFI failure.
|
||||
*/
|
||||
SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
|
||||
/* Turn the MMU off */
|
||||
pre_disable_mmu_workaround
|
||||
mrs x2, sctlr_el2
|
||||
bic x3, x2, #SCTLR_ELx_M
|
||||
msr sctlr_el2, x3
|
||||
mrs x3, sctlr_el2
|
||||
bic x4, x3, #SCTLR_ELx_M
|
||||
msr sctlr_el2, x4
|
||||
isb
|
||||
|
||||
tlbi alle2
|
||||
|
||||
/* Install the new pgtables */
|
||||
ldr x3, [x0, #NVHE_INIT_PGD_PA]
|
||||
phys_to_ttbr x4, x3
|
||||
phys_to_ttbr x5, x0
|
||||
alternative_if ARM64_HAS_CNP
|
||||
orr x4, x4, #TTBR_CNP_BIT
|
||||
orr x5, x5, #TTBR_CNP_BIT
|
||||
alternative_else_nop_endif
|
||||
msr ttbr0_el2, x4
|
||||
msr ttbr0_el2, x5
|
||||
|
||||
/* Set the new stack pointer */
|
||||
ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA]
|
||||
mov sp, x0
|
||||
mov sp, x1
|
||||
|
||||
/* And turn the MMU back on! */
|
||||
dsb nsh
|
||||
isb
|
||||
set_sctlr_el2 x2
|
||||
ret x1
|
||||
set_sctlr_el2 x3
|
||||
ret x2
|
||||
SYM_FUNC_END(__pkvm_init_switch_pgd)
|
||||
|
||||
.popsection
|
||||
|
@ -339,7 +339,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
||||
{
|
||||
struct kvm_nvhe_init_params *params;
|
||||
void *virt = hyp_phys_to_virt(phys);
|
||||
void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
|
||||
typeof(__pkvm_init_switch_pgd) *fn;
|
||||
int ret;
|
||||
|
||||
BUG_ON(kvm_check_pvm_sysreg_table());
|
||||
@ -363,7 +363,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
||||
/* Jump in the idmap page to switch to the new page-tables */
|
||||
params = this_cpu_ptr(&kvm_init_params);
|
||||
fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
|
||||
fn(__hyp_pa(params), __pkvm_init_finalise);
|
||||
fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
|
||||
|
||||
unreachable();
|
||||
}
|
||||
|
@ -65,6 +65,77 @@ static u64 __compute_hcr(struct kvm_vcpu *vcpu)
|
||||
return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE);
|
||||
}
|
||||
|
||||
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 cptr;
|
||||
|
||||
/*
|
||||
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
|
||||
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
|
||||
* except for some missing controls, such as TAM.
|
||||
* In this case, CPTR_EL2.TAM has the same position with or without
|
||||
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
|
||||
* shift value for trapping the AMU accesses.
|
||||
*/
|
||||
u64 val = CPACR_ELx_TTA | CPTR_EL2_TAM;
|
||||
|
||||
if (guest_owns_fp_regs()) {
|
||||
val |= CPACR_ELx_FPEN;
|
||||
if (vcpu_has_sve(vcpu))
|
||||
val |= CPACR_ELx_ZEN;
|
||||
} else {
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
|
||||
if (!vcpu_has_nv(vcpu))
|
||||
goto write;
|
||||
|
||||
/*
|
||||
* The architecture is a bit crap (what a surprise): an EL2 guest
|
||||
* writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
|
||||
* as they are RES0 in the guest's view. To work around it, trap the
|
||||
* sucker using the very same bit it can't set...
|
||||
*/
|
||||
if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
|
||||
val |= CPTR_EL2_TCPAC;
|
||||
|
||||
/*
|
||||
* Layer the guest hypervisor's trap configuration on top of our own if
|
||||
* we're in a nested context.
|
||||
*/
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
goto write;
|
||||
|
||||
cptr = vcpu_sanitised_cptr_el2(vcpu);
|
||||
|
||||
/*
|
||||
* Pay attention, there's some interesting detail here.
|
||||
*
|
||||
* The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
|
||||
* meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
|
||||
*
|
||||
* - CPTR_EL2.xEN = x0, traps are enabled
|
||||
* - CPTR_EL2.xEN = x1, traps are disabled
|
||||
*
|
||||
* In other words, bit[0] determines if guest accesses trap or not. In
|
||||
* the interest of simplicity, clear the entire field if the guest
|
||||
* hypervisor has traps enabled to dispel any illusion of something more
|
||||
* complicated taking place.
|
||||
*/
|
||||
if (!(SYS_FIELD_GET(CPACR_ELx, FPEN, cptr) & BIT(0)))
|
||||
val &= ~CPACR_ELx_FPEN;
|
||||
if (!(SYS_FIELD_GET(CPACR_ELx, ZEN, cptr) & BIT(0)))
|
||||
val &= ~CPACR_ELx_ZEN;
|
||||
|
||||
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
|
||||
val |= cptr & CPACR_ELx_E0POE;
|
||||
|
||||
val |= cptr & CPTR_EL2_TCPAC;
|
||||
|
||||
write:
|
||||
write_sysreg(val, cpacr_el1);
|
||||
}
|
||||
|
||||
static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
@ -91,30 +162,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
val = read_sysreg(cpacr_el1);
|
||||
val |= CPACR_ELx_TTA;
|
||||
val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN);
|
||||
|
||||
/*
|
||||
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
|
||||
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
|
||||
* except for some missing controls, such as TAM.
|
||||
* In this case, CPTR_EL2.TAM has the same position with or without
|
||||
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
|
||||
* shift value for trapping the AMU accesses.
|
||||
*/
|
||||
|
||||
val |= CPTR_EL2_TAM;
|
||||
|
||||
if (guest_owns_fp_regs()) {
|
||||
if (vcpu_has_sve(vcpu))
|
||||
val |= CPACR_ELx_ZEN;
|
||||
} else {
|
||||
val &= ~CPACR_ELx_FPEN;
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
|
||||
write_sysreg(val, cpacr_el1);
|
||||
__activate_cptr_traps(vcpu);
|
||||
|
||||
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
|
||||
}
|
||||
@ -266,10 +314,111 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
u32 instr;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Ideally, we would never trap on EL2 S1 TLB invalidations using
|
||||
* the EL1 instructions when the guest's HCR_EL2.{E2H,TGE}=={1,1}.
|
||||
* But "thanks" to FEAT_NV2, we don't trap writes to HCR_EL2,
|
||||
* meaning that we can't track changes to the virtual TGE bit. So we
|
||||
* have to leave HCR_EL2.TTLB set on the host. Oopsie...
|
||||
*
|
||||
* Try and handle these invalidation as quickly as possible, without
|
||||
* fully exiting. Note that we don't need to consider any forwarding
|
||||
* here, as having E2H+TGE set is the very definition of being
|
||||
* InHost.
|
||||
*
|
||||
* For the lesser hypervisors out there that have failed to get on
|
||||
* with the VHE program, we can also handle the nVHE style of EL2
|
||||
* invalidation.
|
||||
*/
|
||||
if (!(is_hyp_ctxt(vcpu)))
|
||||
return false;
|
||||
|
||||
instr = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
|
||||
val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
|
||||
|
||||
if ((kvm_supported_tlbi_s1e1_op(vcpu, instr) &&
|
||||
vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) ||
|
||||
kvm_supported_tlbi_s1e2_op (vcpu, instr))
|
||||
ret = __kvm_tlbi_s1e2(NULL, val, instr);
|
||||
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
__kvm_skip_instr(vcpu);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
int rt;
|
||||
|
||||
if (!is_hyp_ctxt(vcpu) || esr_sys64_to_sysreg(esr) != SYS_CPACR_EL1)
|
||||
return false;
|
||||
|
||||
rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
|
||||
if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ) {
|
||||
vcpu_set_reg(vcpu, rt, __vcpu_sys_reg(vcpu, CPTR_EL2));
|
||||
} else {
|
||||
vcpu_write_sys_reg(vcpu, vcpu_get_reg(vcpu, rt), CPTR_EL2);
|
||||
__activate_cptr_traps(vcpu);
|
||||
}
|
||||
|
||||
__kvm_skip_instr(vcpu);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_zcr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
|
||||
|
||||
if (!vcpu_has_nv(vcpu))
|
||||
return false;
|
||||
|
||||
if (sysreg != SYS_ZCR_EL2)
|
||||
return false;
|
||||
|
||||
if (guest_owns_fp_regs())
|
||||
return false;
|
||||
|
||||
/*
|
||||
* ZCR_EL2 traps are handled in the slow path, with the expectation
|
||||
* that the guest's FP context has already been loaded onto the CPU.
|
||||
*
|
||||
* Load the guest's FP context and unconditionally forward to the
|
||||
* slow path for handling (i.e. return false).
|
||||
*/
|
||||
kvm_hyp_handle_fpsimd(vcpu, exit_code);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (kvm_hyp_handle_tlbi_el2(vcpu, exit_code))
|
||||
return true;
|
||||
|
||||
if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code))
|
||||
return true;
|
||||
|
||||
if (kvm_hyp_handle_zcr_el2(vcpu, exit_code))
|
||||
return true;
|
||||
|
||||
return kvm_hyp_handle_sysreg(vcpu, exit_code);
|
||||
}
|
||||
|
||||
static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = NULL,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
|
||||
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
|
||||
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg_vhe,
|
||||
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
@ -388,7 +537,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
|
||||
static void __noreturn __hyp_call_panic(u64 spsr, u64 elr, u64 par)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
@ -413,7 +562,6 @@ void __noreturn hyp_panic(void)
|
||||
u64 par = read_sysreg_par();
|
||||
|
||||
__hyp_call_panic(spsr, elr, par);
|
||||
unreachable();
|
||||
}
|
||||
|
||||
asmlinkage void kvm_unexpected_el2_exception(void)
|
||||
|
@ -219,3 +219,150 @@ void __kvm_flush_vm_context(void)
|
||||
__tlbi(alle1is);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
/*
|
||||
* TLB invalidation emulation for NV. For any given instruction, we
|
||||
* perform the following transformtions:
|
||||
*
|
||||
* - a TLBI targeting EL2 S1 is remapped to EL1 S1
|
||||
* - a non-shareable TLBI is upgraded to being inner-shareable
|
||||
* - an outer-shareable TLBI is also mapped to inner-shareable
|
||||
* - an nXS TLBI is upgraded to XS
|
||||
*/
|
||||
int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding)
|
||||
{
|
||||
struct tlb_inv_context cxt;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* The guest will have provided its own DSB ISHST before trapping.
|
||||
* If it hasn't, that's its own problem, and we won't paper over it
|
||||
* (plus, there is plenty of extra synchronisation before we even
|
||||
* get here...).
|
||||
*/
|
||||
|
||||
if (mmu)
|
||||
enter_vmid_context(mmu, &cxt);
|
||||
|
||||
switch (sys_encoding) {
|
||||
case OP_TLBI_ALLE2:
|
||||
case OP_TLBI_ALLE2IS:
|
||||
case OP_TLBI_ALLE2OS:
|
||||
case OP_TLBI_VMALLE1:
|
||||
case OP_TLBI_VMALLE1IS:
|
||||
case OP_TLBI_VMALLE1OS:
|
||||
case OP_TLBI_ALLE2NXS:
|
||||
case OP_TLBI_ALLE2ISNXS:
|
||||
case OP_TLBI_ALLE2OSNXS:
|
||||
case OP_TLBI_VMALLE1NXS:
|
||||
case OP_TLBI_VMALLE1ISNXS:
|
||||
case OP_TLBI_VMALLE1OSNXS:
|
||||
__tlbi(vmalle1is);
|
||||
break;
|
||||
case OP_TLBI_VAE2:
|
||||
case OP_TLBI_VAE2IS:
|
||||
case OP_TLBI_VAE2OS:
|
||||
case OP_TLBI_VAE1:
|
||||
case OP_TLBI_VAE1IS:
|
||||
case OP_TLBI_VAE1OS:
|
||||
case OP_TLBI_VAE2NXS:
|
||||
case OP_TLBI_VAE2ISNXS:
|
||||
case OP_TLBI_VAE2OSNXS:
|
||||
case OP_TLBI_VAE1NXS:
|
||||
case OP_TLBI_VAE1ISNXS:
|
||||
case OP_TLBI_VAE1OSNXS:
|
||||
__tlbi(vae1is, va);
|
||||
break;
|
||||
case OP_TLBI_VALE2:
|
||||
case OP_TLBI_VALE2IS:
|
||||
case OP_TLBI_VALE2OS:
|
||||
case OP_TLBI_VALE1:
|
||||
case OP_TLBI_VALE1IS:
|
||||
case OP_TLBI_VALE1OS:
|
||||
case OP_TLBI_VALE2NXS:
|
||||
case OP_TLBI_VALE2ISNXS:
|
||||
case OP_TLBI_VALE2OSNXS:
|
||||
case OP_TLBI_VALE1NXS:
|
||||
case OP_TLBI_VALE1ISNXS:
|
||||
case OP_TLBI_VALE1OSNXS:
|
||||
__tlbi(vale1is, va);
|
||||
break;
|
||||
case OP_TLBI_ASIDE1:
|
||||
case OP_TLBI_ASIDE1IS:
|
||||
case OP_TLBI_ASIDE1OS:
|
||||
case OP_TLBI_ASIDE1NXS:
|
||||
case OP_TLBI_ASIDE1ISNXS:
|
||||
case OP_TLBI_ASIDE1OSNXS:
|
||||
__tlbi(aside1is, va);
|
||||
break;
|
||||
case OP_TLBI_VAAE1:
|
||||
case OP_TLBI_VAAE1IS:
|
||||
case OP_TLBI_VAAE1OS:
|
||||
case OP_TLBI_VAAE1NXS:
|
||||
case OP_TLBI_VAAE1ISNXS:
|
||||
case OP_TLBI_VAAE1OSNXS:
|
||||
__tlbi(vaae1is, va);
|
||||
break;
|
||||
case OP_TLBI_VAALE1:
|
||||
case OP_TLBI_VAALE1IS:
|
||||
case OP_TLBI_VAALE1OS:
|
||||
case OP_TLBI_VAALE1NXS:
|
||||
case OP_TLBI_VAALE1ISNXS:
|
||||
case OP_TLBI_VAALE1OSNXS:
|
||||
__tlbi(vaale1is, va);
|
||||
break;
|
||||
case OP_TLBI_RVAE2:
|
||||
case OP_TLBI_RVAE2IS:
|
||||
case OP_TLBI_RVAE2OS:
|
||||
case OP_TLBI_RVAE1:
|
||||
case OP_TLBI_RVAE1IS:
|
||||
case OP_TLBI_RVAE1OS:
|
||||
case OP_TLBI_RVAE2NXS:
|
||||
case OP_TLBI_RVAE2ISNXS:
|
||||
case OP_TLBI_RVAE2OSNXS:
|
||||
case OP_TLBI_RVAE1NXS:
|
||||
case OP_TLBI_RVAE1ISNXS:
|
||||
case OP_TLBI_RVAE1OSNXS:
|
||||
__tlbi(rvae1is, va);
|
||||
break;
|
||||
case OP_TLBI_RVALE2:
|
||||
case OP_TLBI_RVALE2IS:
|
||||
case OP_TLBI_RVALE2OS:
|
||||
case OP_TLBI_RVALE1:
|
||||
case OP_TLBI_RVALE1IS:
|
||||
case OP_TLBI_RVALE1OS:
|
||||
case OP_TLBI_RVALE2NXS:
|
||||
case OP_TLBI_RVALE2ISNXS:
|
||||
case OP_TLBI_RVALE2OSNXS:
|
||||
case OP_TLBI_RVALE1NXS:
|
||||
case OP_TLBI_RVALE1ISNXS:
|
||||
case OP_TLBI_RVALE1OSNXS:
|
||||
__tlbi(rvale1is, va);
|
||||
break;
|
||||
case OP_TLBI_RVAAE1:
|
||||
case OP_TLBI_RVAAE1IS:
|
||||
case OP_TLBI_RVAAE1OS:
|
||||
case OP_TLBI_RVAAE1NXS:
|
||||
case OP_TLBI_RVAAE1ISNXS:
|
||||
case OP_TLBI_RVAAE1OSNXS:
|
||||
__tlbi(rvaae1is, va);
|
||||
break;
|
||||
case OP_TLBI_RVAALE1:
|
||||
case OP_TLBI_RVAALE1IS:
|
||||
case OP_TLBI_RVAALE1OS:
|
||||
case OP_TLBI_RVAALE1NXS:
|
||||
case OP_TLBI_RVAALE1ISNXS:
|
||||
case OP_TLBI_RVAALE1OSNXS:
|
||||
__tlbi(rvaale1is, va);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
if (mmu)
|
||||
exit_vmid_context(&cxt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -328,18 +328,23 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
|
||||
may_block));
|
||||
}
|
||||
|
||||
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
|
||||
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
|
||||
{
|
||||
__unmap_stage2_range(mmu, start, size, true);
|
||||
}
|
||||
|
||||
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_flush);
|
||||
}
|
||||
|
||||
static void stage2_flush_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
|
||||
phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
|
||||
|
||||
stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush);
|
||||
kvm_stage2_flush_range(&kvm->arch.mmu, addr, end);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -362,6 +367,8 @@ static void stage2_flush_vm(struct kvm *kvm)
|
||||
kvm_for_each_memslot(memslot, bkt, slots)
|
||||
stage2_flush_memslot(kvm, memslot);
|
||||
|
||||
kvm_nested_s2_flush(kvm);
|
||||
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
}
|
||||
@ -855,21 +862,9 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
|
||||
.icache_inval_pou = invalidate_icache_guest_page,
|
||||
};
|
||||
|
||||
/**
|
||||
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
|
||||
* @kvm: The pointer to the KVM structure
|
||||
* @mmu: The pointer to the s2 MMU structure
|
||||
* @type: The machine type of the virtual machine
|
||||
*
|
||||
* Allocates only the stage-2 HW PGD level table(s).
|
||||
* Note we don't need locking here as this is only called when the VM is
|
||||
* created, which can only be done once.
|
||||
*/
|
||||
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
|
||||
static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type)
|
||||
{
|
||||
u32 kvm_ipa_limit = get_kvm_ipa_limit();
|
||||
int cpu, err;
|
||||
struct kvm_pgtable *pgt;
|
||||
u64 mmfr0, mmfr1;
|
||||
u32 phys_shift;
|
||||
|
||||
@ -896,11 +891,51 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
|
||||
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
|
||||
* @kvm: The pointer to the KVM structure
|
||||
* @mmu: The pointer to the s2 MMU structure
|
||||
* @type: The machine type of the virtual machine
|
||||
*
|
||||
* Allocates only the stage-2 HW PGD level table(s).
|
||||
* Note we don't need locking here as this is only called in two cases:
|
||||
*
|
||||
* - when the VM is created, which can't race against anything
|
||||
*
|
||||
* - when secondary kvm_s2_mmu structures are initialised for NV
|
||||
* guests, and the caller must hold kvm->lock as this is called on a
|
||||
* per-vcpu basis.
|
||||
*/
|
||||
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
|
||||
{
|
||||
int cpu, err;
|
||||
struct kvm_pgtable *pgt;
|
||||
|
||||
/*
|
||||
* If we already have our page tables in place, and that the
|
||||
* MMU context is the canonical one, we have a bug somewhere,
|
||||
* as this is only supposed to ever happen once per VM.
|
||||
*
|
||||
* Otherwise, we're building nested page tables, and that's
|
||||
* probably because userspace called KVM_ARM_VCPU_INIT more
|
||||
* than once on the same vcpu. Since that's actually legal,
|
||||
* don't kick a fuss and leave gracefully.
|
||||
*/
|
||||
if (mmu->pgt != NULL) {
|
||||
if (kvm_is_nested_s2_mmu(kvm, mmu))
|
||||
return 0;
|
||||
|
||||
kvm_err("kvm_arch already initialized?\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = kvm_init_ipa_range(mmu, type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT);
|
||||
if (!pgt)
|
||||
return -ENOMEM;
|
||||
@ -925,6 +960,10 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
|
||||
|
||||
mmu->pgt = pgt;
|
||||
mmu->pgd_phys = __pa(pgt->pgd);
|
||||
|
||||
if (kvm_is_nested_s2_mmu(kvm, mmu))
|
||||
kvm_init_nested_s2_mmu(mmu);
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy_pgtable:
|
||||
@ -976,7 +1015,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
|
||||
|
||||
if (!(vma->vm_flags & VM_PFNMAP)) {
|
||||
gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
|
||||
unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
|
||||
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
|
||||
}
|
||||
hva = vm_end;
|
||||
} while (hva < reg_end);
|
||||
@ -1003,6 +1042,8 @@ void stage2_unmap_vm(struct kvm *kvm)
|
||||
kvm_for_each_memslot(memslot, bkt, slots)
|
||||
stage2_unmap_memslot(kvm, memslot);
|
||||
|
||||
kvm_nested_s2_unmap(kvm);
|
||||
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
mmap_read_unlock(current->mm);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
@ -1102,12 +1143,12 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_range() - write protect stage2 memory region range
|
||||
* kvm_stage2_wp_range() - write protect stage2 memory region range
|
||||
* @mmu: The KVM stage-2 MMU pointer
|
||||
* @addr: Start address of range
|
||||
* @end: End address of range
|
||||
*/
|
||||
static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
|
||||
void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_wrprotect);
|
||||
}
|
||||
@ -1138,7 +1179,8 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
|
||||
end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
stage2_wp_range(&kvm->arch.mmu, start, end);
|
||||
kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
|
||||
kvm_nested_s2_wp(kvm);
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
kvm_flush_remote_tlbs_memslot(kvm, memslot);
|
||||
}
|
||||
@ -1192,7 +1234,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
|
||||
stage2_wp_range(&kvm->arch.mmu, start, end);
|
||||
kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
|
||||
|
||||
/*
|
||||
* Eager-splitting is done when manual-protect is set. We
|
||||
@ -1204,6 +1246,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
||||
*/
|
||||
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
|
||||
kvm_mmu_split_huge_pages(kvm, start, end);
|
||||
|
||||
kvm_nested_s2_wp(kvm);
|
||||
}
|
||||
|
||||
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
|
||||
@ -1375,6 +1419,7 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_s2_trans *nested,
|
||||
struct kvm_memory_slot *memslot, unsigned long hva,
|
||||
bool fault_is_perm)
|
||||
{
|
||||
@ -1383,6 +1428,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
bool exec_fault, mte_allowed;
|
||||
bool device = false, vfio_allow_any_uc = false;
|
||||
unsigned long mmu_seq;
|
||||
phys_addr_t ipa = fault_ipa;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
|
||||
struct vm_area_struct *vma;
|
||||
@ -1466,10 +1512,38 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
}
|
||||
|
||||
vma_pagesize = 1UL << vma_shift;
|
||||
|
||||
if (nested) {
|
||||
unsigned long max_map_size;
|
||||
|
||||
max_map_size = force_pte ? PAGE_SIZE : PUD_SIZE;
|
||||
|
||||
ipa = kvm_s2_trans_output(nested);
|
||||
|
||||
/*
|
||||
* If we're about to create a shadow stage 2 entry, then we
|
||||
* can only create a block mapping if the guest stage 2 page
|
||||
* table uses at least as big a mapping.
|
||||
*/
|
||||
max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
|
||||
|
||||
/*
|
||||
* Be careful that if the mapping size falls between
|
||||
* two host sizes, take the smallest of the two.
|
||||
*/
|
||||
if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
|
||||
max_map_size = PMD_SIZE;
|
||||
else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
|
||||
max_map_size = PAGE_SIZE;
|
||||
|
||||
force_pte = (max_map_size == PAGE_SIZE);
|
||||
vma_pagesize = min(vma_pagesize, (long)max_map_size);
|
||||
}
|
||||
|
||||
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
|
||||
fault_ipa &= ~(vma_pagesize - 1);
|
||||
|
||||
gfn = fault_ipa >> PAGE_SHIFT;
|
||||
gfn = ipa >> PAGE_SHIFT;
|
||||
mte_allowed = kvm_vma_mte_allowed(vma);
|
||||
|
||||
vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
|
||||
@ -1520,6 +1594,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (exec_fault && device)
|
||||
return -ENOEXEC;
|
||||
|
||||
/*
|
||||
* Potentially reduce shadow S2 permissions to match the guest's own
|
||||
* S2. For exec faults, we'd only reach this point if the guest
|
||||
* actually allowed it (see kvm_s2_handle_perm_fault).
|
||||
*
|
||||
* Also encode the level of the original translation in the SW bits
|
||||
* of the leaf entry as a proxy for the span of that translation.
|
||||
* This will be retrieved on TLB invalidation from the guest and
|
||||
* used to limit the invalidation scope if a TTL hint or a range
|
||||
* isn't provided.
|
||||
*/
|
||||
if (nested) {
|
||||
writable &= kvm_s2_trans_writable(nested);
|
||||
if (!kvm_s2_trans_readable(nested))
|
||||
prot &= ~KVM_PGTABLE_PROT_R;
|
||||
|
||||
prot |= kvm_encode_nested_level(nested);
|
||||
}
|
||||
|
||||
read_lock(&kvm->mmu_lock);
|
||||
pgt = vcpu->arch.hw_mmu->pgt;
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
@ -1566,7 +1659,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
prot |= KVM_PGTABLE_PROT_NORMAL_NC;
|
||||
else
|
||||
prot |= KVM_PGTABLE_PROT_DEVICE;
|
||||
} else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
|
||||
} else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) &&
|
||||
(!nested || kvm_s2_trans_executable(nested))) {
|
||||
prot |= KVM_PGTABLE_PROT_X;
|
||||
}
|
||||
|
||||
@ -1575,14 +1669,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
* permissions only if vma_pagesize equals fault_granule. Otherwise,
|
||||
* kvm_pgtable_stage2_map() should be called to change block size.
|
||||
*/
|
||||
if (fault_is_perm && vma_pagesize == fault_granule)
|
||||
if (fault_is_perm && vma_pagesize == fault_granule) {
|
||||
/*
|
||||
* Drop the SW bits in favour of those stored in the
|
||||
* PTE, which will be preserved.
|
||||
*/
|
||||
prot &= ~KVM_NV_GUEST_MAP_SZ;
|
||||
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
|
||||
else
|
||||
} else {
|
||||
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
|
||||
__pfn_to_phys(pfn), prot,
|
||||
memcache,
|
||||
KVM_PGTABLE_WALK_HANDLE_FAULT |
|
||||
KVM_PGTABLE_WALK_SHARED);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&kvm->mmu_lock);
|
||||
|
||||
@ -1626,8 +1727,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
|
||||
*/
|
||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s2_trans nested_trans, *nested = NULL;
|
||||
unsigned long esr;
|
||||
phys_addr_t fault_ipa;
|
||||
phys_addr_t fault_ipa; /* The address we faulted on */
|
||||
phys_addr_t ipa; /* Always the IPA in the L1 guest phys space */
|
||||
struct kvm_memory_slot *memslot;
|
||||
unsigned long hva;
|
||||
bool is_iabt, write_fault, writable;
|
||||
@ -1636,7 +1739,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
|
||||
esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
|
||||
if (esr_fsc_is_translation_fault(esr)) {
|
||||
@ -1686,7 +1789,42 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
gfn = fault_ipa >> PAGE_SHIFT;
|
||||
/*
|
||||
* We may have faulted on a shadow stage 2 page table if we are
|
||||
* running a nested guest. In this case, we have to resolve the L2
|
||||
* IPA to the L1 IPA first, before knowing what kind of memory should
|
||||
* back the L1 IPA.
|
||||
*
|
||||
* If the shadow stage 2 page table walk faults, then we simply inject
|
||||
* this to the guest and carry on.
|
||||
*
|
||||
* If there are no shadow S2 PTs because S2 is disabled, there is
|
||||
* nothing to walk and we treat it as a 1:1 before going through the
|
||||
* canonical translation.
|
||||
*/
|
||||
if (kvm_is_nested_s2_mmu(vcpu->kvm,vcpu->arch.hw_mmu) &&
|
||||
vcpu->arch.hw_mmu->nested_stage2_enabled) {
|
||||
u32 esr;
|
||||
|
||||
ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
|
||||
if (ret) {
|
||||
esr = kvm_s2_trans_esr(&nested_trans);
|
||||
kvm_inject_s2_fault(vcpu, esr);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans);
|
||||
if (ret) {
|
||||
esr = kvm_s2_trans_esr(&nested_trans);
|
||||
kvm_inject_s2_fault(vcpu, esr);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ipa = kvm_s2_trans_output(&nested_trans);
|
||||
nested = &nested_trans;
|
||||
}
|
||||
|
||||
gfn = ipa >> PAGE_SHIFT;
|
||||
memslot = gfn_to_memslot(vcpu->kvm, gfn);
|
||||
hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
|
||||
write_fault = kvm_is_write_fault(vcpu);
|
||||
@ -1730,13 +1868,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
* faulting VA. This is always 12 bits, irrespective
|
||||
* of the page size.
|
||||
*/
|
||||
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
|
||||
ret = io_mem_abort(vcpu, fault_ipa);
|
||||
ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
||||
ret = io_mem_abort(vcpu, ipa);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Userspace should not be able to register out-of-bounds IPAs */
|
||||
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
|
||||
VM_BUG_ON(ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
|
||||
|
||||
if (esr_fsc_is_access_flag_fault(esr)) {
|
||||
handle_access_fault(vcpu, fault_ipa);
|
||||
@ -1744,7 +1882,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva,
|
||||
ret = user_mem_abort(vcpu, fault_ipa, nested, memslot, hva,
|
||||
esr_fsc_is_permission_fault(esr));
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
@ -1767,6 +1905,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||
(range->end - range->start) << PAGE_SHIFT,
|
||||
range->may_block);
|
||||
|
||||
kvm_nested_s2_unmap(kvm);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1780,6 +1919,10 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||
return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
|
||||
range->start << PAGE_SHIFT,
|
||||
size, true);
|
||||
/*
|
||||
* TODO: Handle nested_mmu structures here using the reverse mapping in
|
||||
* a later version of patch series.
|
||||
*/
|
||||
}
|
||||
|
||||
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||
@ -2022,11 +2165,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
||||
{
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
||||
{
|
||||
kvm_uninit_stage2_mmu(kvm);
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot)
|
||||
{
|
||||
@ -2034,7 +2172,8 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
phys_addr_t size = slot->npages << PAGE_SHIFT;
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
unmap_stage2_range(&kvm->arch.mmu, gpa, size);
|
||||
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, size);
|
||||
kvm_nested_s2_unmap(kvm);
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -54,7 +54,7 @@ static u32 __kvm_pmu_event_mask(unsigned int pmuver)
|
||||
|
||||
static u32 kvm_pmu_event_mask(struct kvm *kvm)
|
||||
{
|
||||
u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
|
||||
u64 dfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
|
||||
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
|
||||
|
||||
return __kvm_pmu_event_mask(pmuver);
|
||||
|
@ -268,6 +268,12 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
u32 kvm_get_pa_bits(struct kvm *kvm)
|
||||
{
|
||||
/* Fixed limit until we can configure ID_AA64MMFR0.PARange */
|
||||
return kvm_ipa_limit;
|
||||
}
|
||||
|
||||
u32 get_kvm_ipa_limit(void)
|
||||
{
|
||||
return kvm_ipa_limit;
|
||||
|
@ -121,6 +121,7 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
|
||||
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@ -383,6 +384,12 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
||||
bool was_enabled = vcpu_has_cache_enabled(vcpu);
|
||||
u64 val, mask, shift;
|
||||
|
||||
if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
|
||||
!kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
BUG_ON(!p->is_write);
|
||||
|
||||
get_access_mask(r, &mask, &shift);
|
||||
@ -1565,7 +1572,7 @@ static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
|
||||
|
||||
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
return IDREG(vcpu->kvm, reg_to_encoding(r));
|
||||
return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
|
||||
}
|
||||
|
||||
static bool is_feature_id_reg(u32 encoding)
|
||||
@ -1583,6 +1590,9 @@ static bool is_feature_id_reg(u32 encoding)
|
||||
*/
|
||||
static inline bool is_vm_ftr_id_reg(u32 id)
|
||||
{
|
||||
if (id == SYS_CTR_EL0)
|
||||
return true;
|
||||
|
||||
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
|
||||
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
|
||||
sys_reg_CRm(id) < 8);
|
||||
@ -1851,7 +1861,7 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
|
||||
ret = arm64_check_features(vcpu, rd, val);
|
||||
if (!ret)
|
||||
IDREG(vcpu->kvm, id) = val;
|
||||
kvm_set_vm_id_reg(vcpu->kvm, id, val);
|
||||
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
|
||||
@ -1867,6 +1877,18 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
|
||||
{
|
||||
u64 *p = __vm_id_reg(&kvm->arch, reg);
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
|
||||
return;
|
||||
|
||||
*p = val;
|
||||
}
|
||||
|
||||
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
u64 *val)
|
||||
{
|
||||
@ -1886,7 +1908,7 @@ static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
if (p->is_write)
|
||||
return write_to_read_only(vcpu, p, r);
|
||||
|
||||
p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
||||
p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2199,6 +2221,40 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
return __vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
}
|
||||
|
||||
static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
unsigned int r;
|
||||
|
||||
r = el2_visibility(vcpu, rd);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return sve_visibility(vcpu, rd);
|
||||
}
|
||||
|
||||
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
unsigned int vq;
|
||||
|
||||
if (guest_hyp_sve_traps_enabled(vcpu)) {
|
||||
kvm_inject_nested_sve_trap(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!p->is_write) {
|
||||
p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
|
||||
return true;
|
||||
}
|
||||
|
||||
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
|
||||
vq = min(vq, vcpu_sve_max_vq(vcpu));
|
||||
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
@ -2471,11 +2527,14 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
|
||||
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
|
||||
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
|
||||
.set_user = set_clidr },
|
||||
.set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
|
||||
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
|
||||
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
|
||||
ID_WRITABLE(CTR_EL0, CTR_EL0_DIC_MASK |
|
||||
CTR_EL0_IDC_MASK |
|
||||
CTR_EL0_DminLine_MASK |
|
||||
CTR_EL0_IminLine_MASK),
|
||||
{ SYS_DESC(SYS_SVCR), undef_access },
|
||||
|
||||
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
|
||||
@ -2688,6 +2747,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
|
||||
EL2_REG_VNCR(HACR_EL2, reset_val, 0),
|
||||
|
||||
{ SYS_DESC(SYS_ZCR_EL2), .access = access_zcr_el2, .reset = reset_val,
|
||||
.visibility = sve_el2_visibility, .reg = ZCR_EL2 },
|
||||
|
||||
EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
|
||||
|
||||
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
|
||||
@ -2741,6 +2803,264 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
|
||||
};
|
||||
|
||||
static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
|
||||
{
|
||||
struct kvm *kvm = vpcu->kvm;
|
||||
u8 CRm = sys_reg_CRm(instr);
|
||||
|
||||
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
|
||||
return false;
|
||||
|
||||
if (CRm == TLBI_CRm_nROS &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
||||
|
||||
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
/*
|
||||
* Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
|
||||
* corresponding VMIDs.
|
||||
*/
|
||||
kvm_nested_s2_unmap(vcpu->kvm);
|
||||
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
|
||||
{
|
||||
struct kvm *kvm = vpcu->kvm;
|
||||
u8 CRm = sys_reg_CRm(instr);
|
||||
u8 Op2 = sys_reg_Op2(instr);
|
||||
|
||||
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
|
||||
return false;
|
||||
|
||||
if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
||||
return false;
|
||||
|
||||
if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
return false;
|
||||
|
||||
if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
|
||||
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Only defined here as this is an internal "abstraction" */
|
||||
union tlbi_info {
|
||||
struct {
|
||||
u64 start;
|
||||
u64 size;
|
||||
} range;
|
||||
|
||||
struct {
|
||||
u64 addr;
|
||||
} ipa;
|
||||
|
||||
struct {
|
||||
u64 addr;
|
||||
u32 encoding;
|
||||
} va;
|
||||
};
|
||||
|
||||
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
|
||||
const union tlbi_info *info)
|
||||
{
|
||||
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size);
|
||||
}
|
||||
|
||||
static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
||||
u64 limit, vttbr;
|
||||
|
||||
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
||||
limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
|
||||
|
||||
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
||||
&(union tlbi_info) {
|
||||
.range = {
|
||||
.start = 0,
|
||||
.size = limit,
|
||||
},
|
||||
},
|
||||
s2_mmu_unmap_range);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
||||
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
||||
u64 base, range, tg, num, scale;
|
||||
int shift;
|
||||
|
||||
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because the shadow S2 structure doesn't necessarily reflect that
|
||||
* of the guest's S2 (different base granule size, for example), we
|
||||
* decide to ignore TTL and only use the described range.
|
||||
*/
|
||||
tg = FIELD_GET(GENMASK(47, 46), p->regval);
|
||||
scale = FIELD_GET(GENMASK(45, 44), p->regval);
|
||||
num = FIELD_GET(GENMASK(43, 39), p->regval);
|
||||
base = p->regval & GENMASK(36, 0);
|
||||
|
||||
switch(tg) {
|
||||
case 1:
|
||||
shift = 12;
|
||||
break;
|
||||
case 2:
|
||||
shift = 14;
|
||||
break;
|
||||
case 3:
|
||||
default: /* IMPDEF: handle tg==0 as 64k */
|
||||
shift = 16;
|
||||
break;
|
||||
}
|
||||
|
||||
base <<= shift;
|
||||
range = __TLBI_RANGE_PAGES(num, scale) << shift;
|
||||
|
||||
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
||||
&(union tlbi_info) {
|
||||
.range = {
|
||||
.start = base,
|
||||
.size = range,
|
||||
},
|
||||
},
|
||||
s2_mmu_unmap_range);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
|
||||
const union tlbi_info *info)
|
||||
{
|
||||
unsigned long max_size;
|
||||
u64 base_addr;
|
||||
|
||||
/*
|
||||
* We drop a number of things from the supplied value:
|
||||
*
|
||||
* - NS bit: we're non-secure only.
|
||||
*
|
||||
* - IPA[51:48]: We don't support 52bit IPA just yet...
|
||||
*
|
||||
* And of course, adjust the IPA to be on an actual address.
|
||||
*/
|
||||
base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
|
||||
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
|
||||
base_addr &= ~(max_size - 1);
|
||||
|
||||
kvm_stage2_unmap_range(mmu, base_addr, max_size);
|
||||
}
|
||||
|
||||
static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
||||
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
||||
|
||||
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
||||
&(union tlbi_info) {
|
||||
.ipa = {
|
||||
.addr = p->regval,
|
||||
},
|
||||
},
|
||||
s2_mmu_unmap_ipa);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
|
||||
const union tlbi_info *info)
|
||||
{
|
||||
WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
|
||||
}
|
||||
|
||||
static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
|
||||
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
|
||||
|
||||
/*
|
||||
* If we're here, this is because we've trapped on a EL1 TLBI
|
||||
* instruction that affects the EL1 translation regime while
|
||||
* we're running in a context that doesn't allow us to let the
|
||||
* HW do its thing (aka vEL2):
|
||||
*
|
||||
* - HCR_EL2.E2H == 0 : a non-VHE guest
|
||||
* - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
|
||||
*
|
||||
* We don't expect these helpers to ever be called when running
|
||||
* in a vEL1 context.
|
||||
*/
|
||||
|
||||
WARN_ON(!vcpu_is_el2(vcpu));
|
||||
|
||||
if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
|
||||
&(union tlbi_info) {
|
||||
.va = {
|
||||
.addr = p->regval,
|
||||
.encoding = sys_encoding,
|
||||
},
|
||||
},
|
||||
s2_mmu_tlbi_s1e1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define SYS_INSN(insn, access_fn) \
|
||||
{ \
|
||||
SYS_DESC(OP_##insn), \
|
||||
.access = (access_fn), \
|
||||
}
|
||||
|
||||
static struct sys_reg_desc sys_insn_descs[] = {
|
||||
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
|
||||
@ -2751,9 +3071,147 @@ static struct sys_reg_desc sys_insn_descs[] = {
|
||||
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
|
||||
};
|
||||
|
||||
static const struct sys_reg_desc *first_idreg;
|
||||
SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
|
||||
SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
|
||||
|
||||
SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
|
||||
|
||||
SYS_INSN(TLBI_ALLE2OS, trap_undef),
|
||||
SYS_INSN(TLBI_VAE2OS, trap_undef),
|
||||
SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
|
||||
SYS_INSN(TLBI_VALE2OS, trap_undef),
|
||||
SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
|
||||
|
||||
SYS_INSN(TLBI_RVAE2IS, trap_undef),
|
||||
SYS_INSN(TLBI_RVALE2IS, trap_undef),
|
||||
|
||||
SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
|
||||
SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
|
||||
SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_RVAE2OS, trap_undef),
|
||||
SYS_INSN(TLBI_RVALE2OS, trap_undef),
|
||||
SYS_INSN(TLBI_RVAE2, trap_undef),
|
||||
SYS_INSN(TLBI_RVALE2, trap_undef),
|
||||
SYS_INSN(TLBI_ALLE1, handle_alle1is),
|
||||
SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
|
||||
|
||||
SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
|
||||
|
||||
SYS_INSN(TLBI_ALLE2OSNXS, trap_undef),
|
||||
SYS_INSN(TLBI_VAE2OSNXS, trap_undef),
|
||||
SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
|
||||
SYS_INSN(TLBI_VALE2OSNXS, trap_undef),
|
||||
SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
|
||||
|
||||
SYS_INSN(TLBI_RVAE2ISNXS, trap_undef),
|
||||
SYS_INSN(TLBI_RVALE2ISNXS, trap_undef),
|
||||
SYS_INSN(TLBI_ALLE2ISNXS, trap_undef),
|
||||
SYS_INSN(TLBI_VAE2ISNXS, trap_undef),
|
||||
|
||||
SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
|
||||
SYS_INSN(TLBI_VALE2ISNXS, trap_undef),
|
||||
SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
|
||||
SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
|
||||
SYS_INSN(TLBI_RVAE2OSNXS, trap_undef),
|
||||
SYS_INSN(TLBI_RVALE2OSNXS, trap_undef),
|
||||
SYS_INSN(TLBI_RVAE2NXS, trap_undef),
|
||||
SYS_INSN(TLBI_RVALE2NXS, trap_undef),
|
||||
SYS_INSN(TLBI_ALLE2NXS, trap_undef),
|
||||
SYS_INSN(TLBI_VAE2NXS, trap_undef),
|
||||
SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
|
||||
SYS_INSN(TLBI_VALE2NXS, trap_undef),
|
||||
SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
|
||||
};
|
||||
|
||||
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
@ -2762,7 +3220,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
||||
if (p->is_write) {
|
||||
return ignore_write(vcpu, p);
|
||||
} else {
|
||||
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
|
||||
u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
|
||||
u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
|
||||
|
||||
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
|
||||
@ -3440,6 +3898,25 @@ static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
|
||||
{
|
||||
unsigned long i, idreg_idx = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
|
||||
const struct sys_reg_desc *r = &sys_reg_descs[i];
|
||||
|
||||
if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
|
||||
continue;
|
||||
|
||||
if (idreg_idx == pos)
|
||||
return r;
|
||||
|
||||
idreg_idx++;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct kvm *kvm = s->private;
|
||||
@ -3451,7 +3928,7 @@ static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
|
||||
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
|
||||
*iter == (u8)~0) {
|
||||
*iter = *pos;
|
||||
if (*iter >= KVM_ARM_ID_REG_NUM)
|
||||
if (!idregs_debug_find(kvm, *iter))
|
||||
iter = NULL;
|
||||
} else {
|
||||
iter = ERR_PTR(-EBUSY);
|
||||
@ -3468,7 +3945,7 @@ static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if ((kvm->arch.idreg_debugfs_iter + 1) < KVM_ARM_ID_REG_NUM) {
|
||||
if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
|
||||
kvm->arch.idreg_debugfs_iter++;
|
||||
|
||||
return &kvm->arch.idreg_debugfs_iter;
|
||||
@ -3493,16 +3970,16 @@ static void idregs_debug_stop(struct seq_file *s, void *v)
|
||||
|
||||
static int idregs_debug_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct kvm *kvm = s->private;
|
||||
const struct sys_reg_desc *desc;
|
||||
struct kvm *kvm = s->private;
|
||||
|
||||
desc = first_idreg + kvm->arch.idreg_debugfs_iter;
|
||||
desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
|
||||
|
||||
if (!desc->name)
|
||||
return 0;
|
||||
|
||||
seq_printf(s, "%20s:\t%016llx\n",
|
||||
desc->name, IDREG(kvm, IDX_IDREG(kvm->arch.idreg_debugfs_iter)));
|
||||
desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3532,8 +4009,7 @@ static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc
|
||||
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
IDREG(kvm, id) = reg->reset(vcpu, reg);
|
||||
kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
|
||||
}
|
||||
|
||||
static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
|
||||
@ -3686,8 +4162,8 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
|
||||
*/
|
||||
|
||||
#define FUNCTION_INVARIANT(reg) \
|
||||
static u64 get_##reg(struct kvm_vcpu *v, \
|
||||
const struct sys_reg_desc *r) \
|
||||
static u64 reset_##reg(struct kvm_vcpu *v, \
|
||||
const struct sys_reg_desc *r) \
|
||||
{ \
|
||||
((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
|
||||
return ((struct sys_reg_desc *)r)->val; \
|
||||
@ -3697,18 +4173,11 @@ FUNCTION_INVARIANT(midr_el1)
|
||||
FUNCTION_INVARIANT(revidr_el1)
|
||||
FUNCTION_INVARIANT(aidr_el1)
|
||||
|
||||
static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
|
||||
{
|
||||
((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
||||
return ((struct sys_reg_desc *)r)->val;
|
||||
}
|
||||
|
||||
/* ->val is filled in by kvm_sys_reg_table_init() */
|
||||
static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
|
||||
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
|
||||
{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
|
||||
{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
|
||||
{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
|
||||
{ SYS_DESC(SYS_MIDR_EL1), NULL, reset_midr_el1 },
|
||||
{ SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 },
|
||||
{ SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
|
||||
};
|
||||
|
||||
static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
|
||||
@ -4019,20 +4488,11 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
|
||||
if (!is_feature_id_reg(encoding) || !reg->set_user)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* For ID registers, we return the writable mask. Other feature
|
||||
* registers return a full 64bit mask. That's not necessary
|
||||
* compliant with a given revision of the architecture, but the
|
||||
* RES0/RES1 definitions allow us to do that.
|
||||
*/
|
||||
if (is_vm_ftr_id_reg(encoding)) {
|
||||
if (!reg->val ||
|
||||
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0()))
|
||||
continue;
|
||||
val = reg->val;
|
||||
} else {
|
||||
val = ~0UL;
|
||||
if (!reg->val ||
|
||||
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
|
||||
continue;
|
||||
}
|
||||
val = reg->val;
|
||||
|
||||
if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
|
||||
return -EFAULT;
|
||||
@ -4041,11 +4501,34 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_init_sysreg(struct kvm_vcpu *vcpu)
|
||||
static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
if (has_vhe() || has_hvhe())
|
||||
vcpu->arch.hcr_el2 |= HCR_E2H;
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
|
||||
/* route synchronous external abort exceptions to EL2 */
|
||||
vcpu->arch.hcr_el2 |= HCR_TEA;
|
||||
/* trap error record accesses */
|
||||
vcpu->arch.hcr_el2 |= HCR_TERR;
|
||||
}
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||
vcpu->arch.hcr_el2 |= HCR_FWB;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
|
||||
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
|
||||
kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
|
||||
vcpu->arch.hcr_el2 |= HCR_TID4;
|
||||
else
|
||||
vcpu->arch.hcr_el2 |= HCR_TID2;
|
||||
|
||||
if (vcpu_el1_is_32bit(vcpu))
|
||||
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
||||
|
||||
if (kvm_has_mte(vcpu->kvm))
|
||||
vcpu->arch.hcr_el2 |= HCR_ATA;
|
||||
|
||||
/*
|
||||
* In the absence of FGT, we cannot independently trap TLBI
|
||||
@ -4054,12 +4537,29 @@ void kvm_init_sysreg(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
|
||||
}
|
||||
|
||||
void kvm_calculate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
vcpu_set_hcr(vcpu);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
|
||||
vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS;
|
||||
/*
|
||||
* In general, all HCRX_EL2 bits are gated by a feature.
|
||||
* The only reason we can set SMPME without checking any
|
||||
* feature is that its effects are not directly observable
|
||||
* from the guest.
|
||||
*/
|
||||
vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
|
||||
|
||||
if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
|
||||
vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
|
||||
|
||||
if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
|
||||
vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
|
||||
}
|
||||
|
||||
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
|
||||
@ -4115,7 +4615,6 @@ out:
|
||||
|
||||
int __init kvm_sys_reg_table_init(void)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
bool valid = true;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
@ -4136,12 +4635,6 @@ int __init kvm_sys_reg_table_init(void)
|
||||
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
|
||||
invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
|
||||
|
||||
/* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
|
||||
params = encoding_to_params(SYS_ID_PFR0_EL1);
|
||||
first_idreg = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
||||
if (!first_idreg)
|
||||
return -EINVAL;
|
||||
|
||||
ret = populate_nv_trap_config();
|
||||
|
||||
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
|
||||
|
@ -212,6 +212,9 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
|
||||
|
||||
extern const struct bus_type ffa_bus_type;
|
||||
|
||||
/* The FF-A 1.0 partition structure lacks the uuid[4] */
|
||||
#define FFA_1_0_PARTITON_INFO_SZ (8)
|
||||
|
||||
/* FFA transport related */
|
||||
struct ffa_partition_info {
|
||||
u16 id;
|
||||
|
@ -219,6 +219,7 @@ static void guest_code(void)
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
|
||||
GUEST_REG_SYNC(SYS_CTR_EL0);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -490,11 +491,25 @@ static void test_clidr(struct kvm_vcpu *vcpu)
|
||||
test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;
|
||||
}
|
||||
|
||||
static void test_ctr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 ctr;
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), &ctr);
|
||||
ctr &= ~CTR_EL0_DIC_MASK;
|
||||
if (ctr & CTR_EL0_IminLine_MASK)
|
||||
ctr--;
|
||||
|
||||
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), ctr);
|
||||
test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;
|
||||
}
|
||||
|
||||
static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
test_clidr(vcpu);
|
||||
test_ctr(vcpu);
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val);
|
||||
val++;
|
||||
@ -524,7 +539,9 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
|
||||
for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
|
||||
test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
|
||||
|
||||
test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);
|
||||
test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
|
||||
test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);
|
||||
|
||||
ksft_test_result_pass("%s\n", __func__);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user