From 501a67a25dd459a7d16b35ecdbe8a1ca5463e2bd Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:25 +0100 Subject: [PATCH 01/19] KVM: arm64: Remove __activate_vm wrapper The __activate_vm wrapper serves no useful function and has a misleading name as it simply calls __load_guest_stage2 and does not touch HCR_EL2.VM so remove it. Also rename __deactivate_vm to __load_host_stage2 to match naming pattern. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-2-ascull@google.com --- arch/arm64/kvm/hyp/include/hyp/switch.h | 5 ----- arch/arm64/kvm/hyp/nvhe/switch.c | 8 ++++---- arch/arm64/kvm/hyp/vhe/switch.c | 10 +++++----- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 5b6b8fa00f0a..0864f88bc840 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -126,11 +126,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) } } -static inline void __activate_vm(struct kvm_s2_mmu *mmu) -{ - __load_guest_stage2(mmu); -} - static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) { u64 par, tmp; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 0970442d2dbc..3c9c065b3264 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -93,7 +93,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } -static void __deactivate_vm(struct kvm_vcpu *vcpu) +static void __load_host_stage2(void) { write_sysreg(0, vttbr_el2); } @@ -194,7 +194,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg32_restore_state(vcpu); __sysreg_restore_state_nvhe(guest_ctxt); - __activate_vm(kern_hyp_va(vcpu->arch.hw_mmu)); + __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu)); __activate_traps(vcpu); __hyp_vgic_restore_state(vcpu); @@ -219,7 +219,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __hyp_vgic_save_state(vcpu); __deactivate_traps(vcpu); - __deactivate_vm(vcpu); + __load_host_stage2(); __sysreg_restore_state_nvhe(host_ctxt); @@ -253,7 +253,7 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) if (read_sysreg(vttbr_el2)) { __timer_disable_traps(vcpu); __deactivate_traps(vcpu); - __deactivate_vm(vcpu); + __load_host_stage2(); __sysreg_restore_state_nvhe(host_ctxt); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c1da4f86ccac..6636522a8529 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -120,12 +120,12 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) * HCR_EL2.TGE. * * We have already configured the guest's stage 1 translation in - * kvm_vcpu_load_sysregs_vhe above. We must now call __activate_vm - * before __activate_traps, because __activate_vm configures - * stage 2 translation, and __activate_traps clear HCR_EL2.TGE - * (among other things). + * kvm_vcpu_load_sysregs_vhe above. We must now call + * __load_guest_stage2 before __activate_traps, because + * __load_guest_stage2 configures stage 2 translation, and + * __activate_traps clear HCR_EL2.TGE (among other things). */ - __activate_vm(vcpu->arch.hw_mmu); + __load_guest_stage2(vcpu->arch.hw_mmu); __activate_traps(vcpu); sysreg_restore_guest_state_vhe(guest_ctxt); From 6a0259ed29bba83653a36fabcdf6b06aecd78596 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:26 +0100 Subject: [PATCH 02/19] KVM: arm64: Remove hyp_panic arguments hyp_panic is able to find all the context it needs from within itself so remove the argument. The __hyp_panic wrapper becomes redundant so is also removed. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-3-ascull@google.com --- arch/arm64/include/asm/kvm_hyp.h | 2 +- arch/arm64/kvm/hyp/hyp-entry.S | 7 +------ arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +--- arch/arm64/kvm/hyp/nvhe/switch.c | 8 ++++++-- arch/arm64/kvm/hyp/vhe/switch.c | 10 ++++++---- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 46689e7db46c..3de99b323061 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -89,7 +89,7 @@ void deactivate_traps_vhe_put(void); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); -void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt); +void __noreturn hyp_panic(void); #ifdef __KVM_NVHE_HYPERVISOR__ void __noreturn __hyp_do_panic(unsigned long, ...); #endif diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 46b4dab933d0..9cb3fbca5d79 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -210,12 +210,7 @@ SYM_FUNC_START(__hyp_do_panic) SYM_FUNC_END(__hyp_do_panic) #endif -SYM_CODE_START(__hyp_panic) - get_host_ctxt x0, x1 - b hyp_panic -SYM_CODE_END(__hyp_panic) - -.macro invalid_vector label, target = __hyp_panic +.macro invalid_vector label, target = hyp_panic .align 2 SYM_CODE_START(\label) b \target diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 0864f88bc840..96ea3fdd0c20 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -510,13 +510,11 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) static inline void __kvm_unexpected_el2_exception(void) { unsigned long addr, fixup; - struct kvm_cpu_context *host_ctxt; struct exception_table_entry *entry, *end; unsigned long elr_el2 = read_sysreg(elr_el2); entry = hyp_symbol_addr(__start___kvm_ex_table); end = hyp_symbol_addr(__stop___kvm_ex_table); - host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; while (entry < end) { addr = (unsigned long)&entry->insn + entry->insn; @@ -531,7 +529,7 @@ static inline void __kvm_unexpected_el2_exception(void) return; } - hyp_panic(host_ctxt); + hyp_panic(); } #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 3c9c065b3264..26d6fd4b6f4d 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -242,14 +242,18 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) return exit_code; } -void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) +void __noreturn hyp_panic(void) { u64 spsr = read_sysreg_el2(SYS_SPSR); u64 elr = read_sysreg_el2(SYS_ELR); u64 par = read_sysreg(par_el1); - struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu; + struct kvm_cpu_context *host_ctxt; + struct kvm_vcpu *vcpu; unsigned long str_va; + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + vcpu = host_ctxt->__hyp_running_vcpu; + if (read_sysreg(vttbr_el2)) { __timer_disable_traps(vcpu); __deactivate_traps(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 6636522a8529..c1fd47557713 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -192,10 +192,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) return ret; } -static void __hyp_call_panic(u64 spsr, u64 elr, u64 par, - struct kvm_cpu_context *host_ctxt) +static void __hyp_call_panic(u64 spsr, u64 elr, u64 par) { + struct kvm_cpu_context *host_ctxt; struct kvm_vcpu *vcpu; + + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; vcpu = host_ctxt->__hyp_running_vcpu; __deactivate_traps(vcpu); @@ -208,13 +210,13 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par, } NOKPROBE_SYMBOL(__hyp_call_panic); -void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) +void __noreturn hyp_panic(void) { u64 spsr = read_sysreg_el2(SYS_SPSR); u64 elr = read_sysreg_el2(SYS_ELR); u64 par = read_sysreg(par_el1); - __hyp_call_panic(spsr, elr, par, host_ctxt); + __hyp_call_panic(spsr, elr, par); unreachable(); } From d7ca1079d8ea897a8c45c9f78693972e28859056 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:27 +0100 Subject: [PATCH 03/19] KVM: arm64: Remove kvm_host_data_t typedef The kvm_host_data_t typedef is used inconsistently and goes against the kernel's coding style. Remove it in favour of the full struct specifier. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-4-ascull@google.com --- arch/arm64/include/asm/kvm_host.h | 4 +--- arch/arm64/kvm/arm.c | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e52c927aade5..16adbefde1cc 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -262,8 +262,6 @@ struct kvm_host_data { struct kvm_pmu_events pmu_events; }; -typedef struct kvm_host_data kvm_host_data_t; - struct vcpu_reset_state { unsigned long pc; unsigned long r0; @@ -565,7 +563,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); +DECLARE_PER_CPU(struct kvm_host_data, kvm_host_data); static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 46dc3d75cf13..1af4c77feda2 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -46,7 +46,7 @@ __asm__(".arch_extension virt"); #endif -DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); +DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); /* The VMID used in the VTTBR */ @@ -1538,7 +1538,7 @@ static int init_hyp_mode(void) } for_each_possible_cpu(cpu) { - kvm_host_data_t *cpu_data; + struct kvm_host_data *cpu_data; cpu_data = per_cpu_ptr(&kvm_host_data, cpu); err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); From ceee2fe4ba1f0b9dd388fd1030bea0162ca66a40 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:28 +0100 Subject: [PATCH 04/19] KVM: arm64: Choose hyp symbol based on context Make CHOOSE_HYP_SYM select the symbol of the active hypervisor for the host, the nVHE symbol for nVHE and the VHE symbol for VHE. The nVHE and VHE hypervisors see their own symbols without prefixes and trigger a link error when trying to use a symbol of the other hypervisor. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Cc: David Brazdil Link: https://lore.kernel.org/r/20200915104643.2543892-5-ascull@google.com --- arch/arm64/include/asm/kvm_asm.h | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 6f98fbd0ac81..a952859117b2 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -60,10 +60,24 @@ DECLARE_KVM_VHE_SYM(sym); \ DECLARE_KVM_NVHE_SYM(sym) -#define CHOOSE_VHE_SYM(sym) sym -#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) +#if defined(__KVM_NVHE_HYPERVISOR__) + +#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym) +#define CHOOSE_NVHE_SYM(sym) sym +/* The nVHE hypervisor shouldn't even try to access VHE symbols */ +extern void *__nvhe_undefined_symbol; +#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol + +#elif defined(__KVM_VHE_HYPERVISOR) + +#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym) +#define CHOOSE_VHE_SYM(sym) sym +/* The VHE hypervisor shouldn't even try to access nVHE symbols */ +extern void *__vhe_undefined_symbol; +#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol + +#else -#ifndef __KVM_NVHE_HYPERVISOR__ /* * BIG FAT WARNINGS: * @@ -77,10 +91,9 @@ */ #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \ : CHOOSE_NVHE_SYM(sym)) -#else -/* The nVHE hypervisor shouldn't even try to access anything */ -extern void *__nvhe_undefined_symbol; -#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol +#define CHOOSE_VHE_SYM(sym) sym +#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) + #endif /* Translate a kernel address @ptr into its equivalent linear mapping */ From a0e479523e3fb4fa52c351cc4906f38097de789a Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:29 +0100 Subject: [PATCH 05/19] KVM: arm64: Save chosen hyp vector to a percpu variable Introduce a percpu variable to hold the address of the selected hyp vector that will be used with guests. This avoids the selection process each time a guest is being entered and can be used by nVHE when a separate vector is introduced for the host. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-6-ascull@google.com --- arch/arm64/include/asm/kvm_hyp.h | 2 ++ arch/arm64/kvm/arm.c | 5 ++++- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 3de99b323061..1e2491da324e 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -12,6 +12,8 @@ #include #include +DECLARE_PER_CPU(unsigned long, kvm_hyp_vector); + #define read_sysreg_elx(r,nvh,vh) \ ({ \ u64 reg; \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 1af4c77feda2..77fc856ea513 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -47,6 +47,7 @@ __asm__(".arch_extension virt"); #endif DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); +DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); /* The VMID used in the VTTBR */ @@ -1276,7 +1277,7 @@ static void cpu_init_hyp_mode(void) pgd_ptr = kvm_mmu_get_httbr(); hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; - vector_ptr = (unsigned long)kvm_get_hyp_vector(); + vector_ptr = __this_cpu_read(kvm_hyp_vector); /* * Call initialization code, and switch to the full blown HYP code. @@ -1309,6 +1310,8 @@ static void cpu_hyp_reinit(void) cpu_hyp_reset(); + __this_cpu_write(kvm_hyp_vector, (unsigned long)kvm_get_hyp_vector()); + if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); else diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c1fd47557713..b49cf53c11f0 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -59,7 +59,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, cpacr_el1); - write_sysreg(kvm_get_hyp_vector(), vbar_el1); + write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1); } NOKPROBE_SYMBOL(__activate_traps); From 6e3bfbb22c51bd1f121ca5c4bce6b51dc6cdceae Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:30 +0100 Subject: [PATCH 06/19] KVM: arm64: nVHE: Use separate vector for the host The host is treated differently from the guests when an exception is taken so introduce a separate vector that is specialized for the host. This also allows the nVHE specific code to move out of hyp-entry.S and into nvhe/host.S. The host is only expected to make HVC calls and anything else is considered invalid and results in a panic. Hyp initialization is now passed the vector that is used for the host and it is swapped for the guest vector during the context switch. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-7-ascull@google.com --- arch/arm64/include/asm/kvm_asm.h | 2 + arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kvm/arm.c | 11 +++- arch/arm64/kvm/hyp/hyp-entry.S | 66 ------------------- arch/arm64/kvm/hyp/nvhe/Makefile | 2 +- arch/arm64/kvm/hyp/nvhe/host.S | 108 +++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/switch.c | 3 + 7 files changed, 125 insertions(+), 68 deletions(-) create mode 100644 arch/arm64/kvm/hyp/nvhe/host.S diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index a952859117b2..fe51c06d480d 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -111,8 +111,10 @@ struct kvm_vcpu; struct kvm_s2_mmu; DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); +DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector); DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) +#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) #ifdef CONFIG_KVM_INDIRECT_VECTORS diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 8982b68289b7..54bb0eb34b0f 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(arm64_ssbd_callback_required); KVM_NVHE_ALIAS(kvm_host_data); +KVM_NVHE_ALIAS(kvm_hyp_vector); KVM_NVHE_ALIAS(kvm_vgic_global_state); /* Kernel constant needed to compute idmap addresses. */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 77fc856ea513..b6442c6be5ad 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1277,7 +1277,7 @@ static void cpu_init_hyp_mode(void) pgd_ptr = kvm_mmu_get_httbr(); hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; - vector_ptr = __this_cpu_read(kvm_hyp_vector); + vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector)); /* * Call initialization code, and switch to the full blown HYP code. @@ -1542,6 +1542,7 @@ static int init_hyp_mode(void) for_each_possible_cpu(cpu) { struct kvm_host_data *cpu_data; + unsigned long *vector; cpu_data = per_cpu_ptr(&kvm_host_data, cpu); err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); @@ -1550,6 +1551,14 @@ static int init_hyp_mode(void) kvm_err("Cannot map host CPU state: %d\n", err); goto out_err; } + + vector = per_cpu_ptr(&kvm_hyp_vector, cpu); + err = create_hyp_mappings(vector, vector + 1, PAGE_HYP); + + if (err) { + kvm_err("Cannot map hyp guest vector address\n"); + goto out_err; + } } err = hyp_map_aux_data(); diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 9cb3fbca5d79..f92489250dfc 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -12,7 +12,6 @@ #include #include #include -#include #include .macro save_caller_saved_regs_vect @@ -41,20 +40,6 @@ .text -.macro do_el2_call - /* - * Shuffle the parameters before calling the function - * pointed to in x0. Assumes parameters in x[1,2,3]. - */ - str lr, [sp, #-16]! - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr - ldr lr, [sp], #16 -.endm - el1_sync: // Guest trapped into EL2 mrs x0, esr_el2 @@ -63,44 +48,6 @@ el1_sync: // Guest trapped into EL2 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap -#ifdef __KVM_NVHE_HYPERVISOR__ - mrs x1, vttbr_el2 // If vttbr is valid, the guest - cbnz x1, el1_hvc_guest // called HVC - - /* Here, we're pretty sure the host called HVC. */ - ldp x0, x1, [sp], #16 - - /* Check for a stub HVC call */ - cmp x0, #HVC_STUB_HCALL_NR - b.hs 1f - - /* - * Compute the idmap address of __kvm_handle_stub_hvc and - * jump there. Since we use kimage_voffset, do not use the - * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead - * (by loading it from the constant pool). - * - * Preserve x0-x4, which may contain stub parameters. - */ - ldr x5, =__kvm_handle_stub_hvc - ldr_l x6, kimage_voffset - - /* x5 = __pa(x5) */ - sub x5, x5, x6 - br x5 - -1: - /* - * Perform the EL2 call - */ - kern_hyp_va x0 - do_el2_call - - eret - sb -#endif /* __KVM_NVHE_HYPERVISOR__ */ - -el1_hvc_guest: /* * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. * The workaround has already been applied on the host, @@ -198,18 +145,6 @@ el2_error: eret sb -#ifdef __KVM_NVHE_HYPERVISOR__ -SYM_FUNC_START(__hyp_do_panic) - mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ - PSR_MODE_EL1h) - msr spsr_el2, lr - ldr lr, =panic - msr elr_el2, lr - eret - sb -SYM_FUNC_END(__hyp_do_panic) -#endif - .macro invalid_vector label, target = hyp_panic .align 2 SYM_CODE_START(\label) @@ -222,7 +157,6 @@ SYM_CODE_END(\label) invalid_vector el2t_irq_invalid invalid_vector el2t_fiq_invalid invalid_vector el2t_error_invalid - invalid_vector el2h_sync_invalid invalid_vector el2h_irq_invalid invalid_vector el2h_fiq_invalid invalid_vector el1_fiq_invalid diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index aef76487edc2..ddf98eb07b9d 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -6,7 +6,7 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o +obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ ../fpsimd.o ../hyp-entry.o diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S new file mode 100644 index 000000000000..128af58d342d --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 - Google Inc + * Author: Andrew Scull + */ + +#include + +#include +#include +#include + + .text + +SYM_FUNC_START(__hyp_do_panic) + mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ + PSR_MODE_EL1h) + msr spsr_el2, lr + ldr lr, =panic + msr elr_el2, lr + eret + sb +SYM_FUNC_END(__hyp_do_panic) + +.macro host_el1_sync_vect + .align 7 +.L__vect_start\@: + esb + stp x0, x1, [sp, #-16]! + mrs x0, esr_el2 + lsr x0, x0, #ESR_ELx_EC_SHIFT + cmp x0, #ESR_ELx_EC_HVC64 + ldp x0, x1, [sp], #16 + b.ne hyp_panic + + /* Check for a stub HVC call */ + cmp x0, #HVC_STUB_HCALL_NR + b.hs 1f + + /* + * Compute the idmap address of __kvm_handle_stub_hvc and + * jump there. Since we use kimage_voffset, do not use the + * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead + * (by loading it from the constant pool). + * + * Preserve x0-x4, which may contain stub parameters. + */ + ldr x5, =__kvm_handle_stub_hvc + ldr_l x6, kimage_voffset + + /* x5 = __pa(x5) */ + sub x5, x5, x6 + br x5 + +1: + /* + * Shuffle the parameters before calling the function + * pointed to in x0. Assumes parameters in x[1,2,3]. + */ + kern_hyp_va x0 + str lr, [sp, #-16]! + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr + ldr lr, [sp], #16 + + eret + sb +.L__vect_end\@: +.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) + .error "host_el1_sync_vect larger than vector entry" +.endif +.endm + +.macro invalid_host_vect + .align 7 + b hyp_panic +.endm + +/* + * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the + * host knows about the EL2 vectors already, and there is no point in hiding + * them. + */ + .align 11 +SYM_CODE_START(__kvm_hyp_host_vector) + invalid_host_vect // Synchronous EL2t + invalid_host_vect // IRQ EL2t + invalid_host_vect // FIQ EL2t + invalid_host_vect // Error EL2t + + invalid_host_vect // Synchronous EL2h + invalid_host_vect // IRQ EL2h + invalid_host_vect // FIQ EL2h + invalid_host_vect // Error EL2h + + host_el1_sync_vect // Synchronous 64-bit EL1 + invalid_host_vect // IRQ 64-bit EL1 + invalid_host_vect // FIQ 64-bit EL1 + invalid_host_vect // Error 64-bit EL1 + + invalid_host_vect // Synchronous 32-bit EL1 + invalid_host_vect // IRQ 32-bit EL1 + invalid_host_vect // FIQ 32-bit EL1 + invalid_host_vect // Error 32-bit EL1 +SYM_CODE_END(__kvm_hyp_host_vector) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 26d6fd4b6f4d..075384084e08 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -42,6 +42,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) } write_sysreg(val, cptr_el2); + write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; @@ -60,6 +61,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu) { + extern char __kvm_hyp_host_vector[]; u64 mdcr_el2; ___deactivate_traps(vcpu); @@ -91,6 +93,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(mdcr_el2, mdcr_el2); write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); + write_sysreg(__kvm_hyp_host_vector, vbar_el2); } static void __load_host_stage2(void) From 472fc011ccd30f05e0b39d71064777d39dd11cac Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:31 +0100 Subject: [PATCH 07/19] KVM: arm64: nVHE: Don't consume host SErrors with ESB The ESB at the start of the host vector may cause SErrors to be consumed to DISR_EL1. However, this is not checked for the host so the SError could go unhandled. Remove the ESB so that SErrors are not consumed but are instead left pending for the host to consume. __guest_enter already defers entry into a guest if there are any SErrors pending. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Cc: James Morse Link: https://lore.kernel.org/r/20200915104643.2543892-8-ascull@google.com --- arch/arm64/kvm/hyp/nvhe/host.S | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 128af58d342d..da21fddcef75 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -25,7 +25,6 @@ SYM_FUNC_END(__hyp_do_panic) .macro host_el1_sync_vect .align 7 .L__vect_start\@: - esb stp x0, x1, [sp, #-16]! mrs x0, esr_el2 lsr x0, x0, #ESR_ELx_EC_SHIFT @@ -80,6 +79,11 @@ SYM_FUNC_END(__hyp_do_panic) .endm /* + * The host vector does not use an ESB instruction in order to avoid consuming + * SErrors that should only be consumed by the host. Guest entry is deferred by + * __guest_enter if there are any pending asynchronous exceptions so hyp will + * always return to the host without having consumerd host SErrors. + * * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the * host knows about the EL2 vectors already, and there is no point in hiding * them. From b619d9aa8b385bdab747df596961d8fdddde4d52 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:32 +0100 Subject: [PATCH 08/19] KVM: arm64: Introduce hyp context During __guest_enter, save and restore from a new hyp context rather than the host context. This is preparation for separation of the hyp and host context in nVHE. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-9-ascull@google.com --- arch/arm64/include/asm/kvm_hyp.h | 3 ++- arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kvm/arm.c | 10 ++++++++++ arch/arm64/kvm/hyp/entry.S | 10 +++++----- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 7 files changed, 21 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 1e2491da324e..0b525e05e5bf 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -12,6 +12,7 @@ #include #include +DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); DECLARE_PER_CPU(unsigned long, kvm_hyp_vector); #define read_sysreg_elx(r,nvh,vh) \ @@ -89,7 +90,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu); void deactivate_traps_vhe_put(void); #endif -u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); +u64 __guest_enter(struct kvm_vcpu *vcpu); void __noreturn hyp_panic(void); #ifdef __KVM_NVHE_HYPERVISOR__ diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 54bb0eb34b0f..9f419e4fc66b 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(arm64_ssbd_callback_required); KVM_NVHE_ALIAS(kvm_host_data); +KVM_NVHE_ALIAS(kvm_hyp_ctxt); KVM_NVHE_ALIAS(kvm_hyp_vector); KVM_NVHE_ALIAS(kvm_vgic_global_state); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b6442c6be5ad..ae4b34f91e94 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -47,6 +47,7 @@ __asm__(".arch_extension virt"); #endif DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); +DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); @@ -1542,6 +1543,7 @@ static int init_hyp_mode(void) for_each_possible_cpu(cpu) { struct kvm_host_data *cpu_data; + struct kvm_cpu_context *hyp_ctxt; unsigned long *vector; cpu_data = per_cpu_ptr(&kvm_host_data, cpu); @@ -1552,6 +1554,14 @@ static int init_hyp_mode(void) goto out_err; } + hyp_ctxt = per_cpu_ptr(&kvm_hyp_ctxt, cpu); + err = create_hyp_mappings(hyp_ctxt, hyp_ctxt + 1, PAGE_HYP); + + if (err) { + kvm_err("Cannot map hyp context: %d\n", err); + goto out_err; + } + vector = per_cpu_ptr(&kvm_hyp_vector, cpu); err = create_hyp_mappings(vector, vector + 1, PAGE_HYP); diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 76e7eaf4675e..9551d7f186da 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -57,15 +57,15 @@ .endm /* - * u64 __guest_enter(struct kvm_vcpu *vcpu, - * struct kvm_cpu_context *host_ctxt); + * u64 __guest_enter(struct kvm_vcpu *vcpu); */ SYM_FUNC_START(__guest_enter) // x0: vcpu - // x1: host context - // x2-x17: clobbered by macros + // x1-x17: clobbered by macros // x29: guest context + hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2 + // Store the host regs save_callee_saved_regs x1 @@ -148,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) // Store the guest's sp_el0 save_sp_el0 x1, x2 - get_host_ctxt x2, x3 + hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3 // Macro ptrauth_switch_to_guest format: // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 96ea3fdd0c20..afe714056b97 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -381,7 +381,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) return false; - ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt); __ptrauth_save_key(ctxt, APIA); __ptrauth_save_key(ctxt, APIB); __ptrauth_save_key(ctxt, APDA); diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 075384084e08..c99945cda779 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -209,7 +209,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) do { /* Jump in the fire! */ - exit_code = __guest_enter(vcpu, host_ctxt); + exit_code = __guest_enter(vcpu); /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index b49cf53c11f0..cf477f856e51 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -135,7 +135,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) do { /* Jump in the fire! */ - exit_code = __guest_enter(vcpu, host_ctxt); + exit_code = __guest_enter(vcpu); /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); From 7c2e76d87f9ce7af47a07ca803343fd5f4aa4ab5 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:33 +0100 Subject: [PATCH 09/19] KVM: arm64: Update context references from host to hyp Hyp now has its own nominal context for saving and restoring its state when switching to and from a guest. Update the related comments and utilities to match the new name. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-10-ascull@google.com --- arch/arm64/include/asm/kvm_ptrauth.h | 6 +++--- arch/arm64/kvm/hyp/entry.S | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h index 0ddf98c3ba9f..0cd0965255d2 100644 --- a/arch/arm64/include/asm/kvm_ptrauth.h +++ b/arch/arm64/include/asm/kvm_ptrauth.h @@ -60,7 +60,7 @@ .endm /* - * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will + * Both ptrauth_switch_to_guest and ptrauth_switch_to_hyp macros will * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and * then proceed ahead with the save/restore of Pointer Authentication @@ -78,7 +78,7 @@ alternative_else_nop_endif .L__skip_switch\@: .endm -.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 +.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3 alternative_if_not ARM64_HAS_ADDRESS_AUTH b .L__skip_switch\@ alternative_else_nop_endif @@ -96,7 +96,7 @@ alternative_else_nop_endif #else /* !CONFIG_ARM64_PTR_AUTH */ .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 .endm -.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 +.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3 .endm #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 9551d7f186da..38cca690a6ff 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -66,16 +66,16 @@ SYM_FUNC_START(__guest_enter) hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2 - // Store the host regs + // Store the hyp regs save_callee_saved_regs x1 - // Save the host's sp_el0 + // Save hyp's sp_el0 save_sp_el0 x1, x2 - // Now the host state is stored if we have a pending RAS SError it must - // affect the host. If any asynchronous exception is pending we defer - // the guest entry. The DSB isn't necessary before v8.2 as any SError - // would be fatal. + // Now the hyp state is stored if we have a pending RAS SError it must + // affect the host or hyp. If any asynchronous exception is pending we + // defer the guest entry. The DSB isn't necessary before v8.2 as any + // SError would be fatal. alternative_if ARM64_HAS_RAS_EXTN dsb nshst isb @@ -150,17 +150,17 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3 - // Macro ptrauth_switch_to_guest format: - // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) + // Macro ptrauth_switch_to_hyp format: + // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3) // The below macro to save/restore keys is not implemented in C code // as it may cause Pointer Authentication key signing mismatch errors // when this feature is enabled for kernel code. - ptrauth_switch_to_host x1, x2, x3, x4, x5 + ptrauth_switch_to_hyp x1, x2, x3, x4, x5 - // Restore the hosts's sp_el0 + // Restore hyp's sp_el0 restore_sp_el0 x2, x3 - // Now restore the host regs + // Now restore the hyp regs restore_callee_saved_regs x2 alternative_if ARM64_HAS_RAS_EXTN From 7db21530479f071ee0e0a4d5fcf5e6bc6c0352ba Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:34 +0100 Subject: [PATCH 10/19] KVM: arm64: Restore hyp when panicking in guest context If the guest context is loaded when a panic is triggered, restore the hyp context so e.g. the shadow call stack works when hyp_panic() is called and SP_EL0 is valid when the host's panic() is called. Use the hyp context's __hyp_running_vcpu field to track when hyp transitions to and from the guest vcpu so the exception handlers know whether the context needs to be restored. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-11-ascull@google.com --- arch/arm64/include/asm/kvm_asm.h | 10 ++++++++++ arch/arm64/kvm/hyp/entry.S | 24 ++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp-entry.S | 5 ++--- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +++- arch/arm64/kvm/hyp/nvhe/host.S | 5 +++++ 5 files changed, 44 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index fe51c06d480d..4df2bd8882bc 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -236,6 +236,16 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] .endm +.macro get_loaded_vcpu vcpu, ctxt + hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] +.endm + +.macro set_loaded_vcpu vcpu, ctxt, tmp + hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp + str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] +.endm + /* * KVM extable for unexpected exceptions. * In the same format _asm_extable, but output to a different section so that diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 38cca690a6ff..4787fc82790c 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -86,6 +86,8 @@ alternative_else_nop_endif ret 1: + set_loaded_vcpu x0, x1, x2 + add x29, x0, #VCPU_CONTEXT // Macro ptrauth_switch_to_guest format: @@ -116,6 +118,26 @@ alternative_else_nop_endif eret sb +SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) + // x2-x29,lr: vcpu regs + // vcpu x0-x1 on the stack + + // If the hyp context is loaded, go straight to hyp_panic + get_loaded_vcpu x0, x1 + cbz x0, hyp_panic + + // The hyp context is saved so make sure it is restored to allow + // hyp_panic to run at hyp and, subsequently, panic to run in the host. + // This makes use of __guest_exit to avoid duplication but sets the + // return address to tail call into hyp_panic. As a side effect, the + // current state is saved to the guest context but it will only be + // accurate if the guest had been completely restored. + hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1 + adr x1, hyp_panic + str x1, [x0, #CPU_XREG_OFFSET(30)] + + get_vcpu_ptr x1, x0 + SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) // x0: return code // x1: vcpu @@ -163,6 +185,8 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) // Now restore the hyp regs restore_callee_saved_regs x2 + set_loaded_vcpu xzr, x1, x2 + alternative_if ARM64_HAS_RAS_EXTN // If we have the RAS extensions we can consume a pending error // without an unmask-SError and isb. The ESB-instruction consumed any diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index f92489250dfc..bc9f53df46f5 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -145,7 +145,7 @@ el2_error: eret sb -.macro invalid_vector label, target = hyp_panic +.macro invalid_vector label, target = __guest_exit_panic .align 2 SYM_CODE_START(\label) b \target @@ -186,10 +186,9 @@ check_preamble_length 661b, 662b .macro invalid_vect target .align 7 661: - b \target nop + stp x0, x1, [sp, #-16]! 662: - ldp x0, x1, [sp], #16 b \target check_preamble_length 661b, 662b diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index afe714056b97..821721b78ad9 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -509,6 +509,7 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) static inline void __kvm_unexpected_el2_exception(void) { + extern char __guest_exit_panic[]; unsigned long addr, fixup; struct exception_table_entry *entry, *end; unsigned long elr_el2 = read_sysreg(elr_el2); @@ -529,7 +530,8 @@ static inline void __kvm_unexpected_el2_exception(void) return; } - hyp_panic(); + /* Trigger a panic after restoring the hyp context. */ + write_sysreg(__guest_exit_panic, elr_el2); } #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index da21fddcef75..9ab7814e6114 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -75,6 +75,11 @@ SYM_FUNC_END(__hyp_do_panic) .macro invalid_host_vect .align 7 + /* If a guest is loaded, panic out of it. */ + stp x0, x1, [sp, #-16]! + get_loaded_vcpu x0, x1 + cbnz x0, __guest_exit_panic + add sp, sp, #16 b hyp_panic .endm From 603d2bdaa57e48a33d57eeb88971b88fbe5875d6 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:35 +0100 Subject: [PATCH 11/19] KVM: arm64: Share context save and restore macros To avoid duplicating the context save and restore macros, move them into a shareable header. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-12-ascull@google.com --- arch/arm64/include/asm/kvm_asm.h | 39 ++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/entry.S | 39 -------------------------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 4df2bd8882bc..4e3073858346 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -261,6 +261,45 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; .popsection .endm +#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x) +#define CPU_LR_OFFSET CPU_XREG_OFFSET(30) +#define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8) + +/* + * We treat x18 as callee-saved as the host may use it as a platform + * register (e.g. for shadow call stack). + */ +.macro save_callee_saved_regs ctxt + str x18, [\ctxt, #CPU_XREG_OFFSET(18)] + stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +.macro restore_callee_saved_regs ctxt + // We require \ctxt is not x18-x28 + ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] + ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +.macro save_sp_el0 ctxt, tmp + mrs \tmp, sp_el0 + str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] +.endm + +.macro restore_sp_el0 ctxt, tmp + ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] + msr sp_el0, \tmp +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 4787fc82790c..afaa8d1f2485 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -7,7 +7,6 @@ #include #include -#include #include #include #include @@ -16,46 +15,8 @@ #include #include -#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x) -#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8) - .text -/* - * We treat x18 as callee-saved as the host may use it as a platform - * register (e.g. for shadow call stack). - */ -.macro save_callee_saved_regs ctxt - str x18, [\ctxt, #CPU_XREG_OFFSET(18)] - stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] - stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] - stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] - stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] - stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] - stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] -.endm - -.macro restore_callee_saved_regs ctxt - // We require \ctxt is not x18-x28 - ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] - ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] - ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] - ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] - ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] - ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] - ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] -.endm - -.macro save_sp_el0 ctxt, tmp - mrs \tmp, sp_el0 - str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] -.endm - -.macro restore_sp_el0 ctxt, tmp - ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] - msr sp_el0, \tmp -.endm - /* * u64 __guest_enter(struct kvm_vcpu *vcpu); */ From 4e3393a969a0bdaae83263918b6824b2d08c3996 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:36 +0100 Subject: [PATCH 12/19] KVM: arm64: nVHE: Switch to hyp context for EL2 Save and restore the host context when switching to and from hyp. This gives hyp its own context that the host will not see as a step towards a full trust boundary between the two. SP_EL0 and pointer authentication keys are currently shared between the host and hyp so don't need to be switched yet. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-13-ascull@google.com --- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 + arch/arm64/kvm/hyp/nvhe/Makefile | 2 +- arch/arm64/kvm/hyp/nvhe/host.S | 68 ++++++++++++++++++------- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 36 +++++++++++++ 4 files changed, 89 insertions(+), 19 deletions(-) create mode 100644 arch/arm64/kvm/hyp/nvhe/hyp-main.c diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 821721b78ad9..4536b50ddc06 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -372,6 +372,8 @@ static inline bool esr_is_ptrauth_trap(u32 esr) ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ } while(0) +DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); + static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *ctxt; diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index ddf98eb07b9d..46c89e8c30bc 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -6,7 +6,7 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o +obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-main.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ ../fpsimd.o ../hyp-entry.o diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 9ab7814e6114..d26e41773dc4 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -12,6 +12,55 @@ .text +SYM_FUNC_START(__host_exit) + stp x0, x1, [sp, #-16]! + + get_host_ctxt x0, x1 + + ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) + + /* Store the host regs x2 and x3 */ + stp x2, x3, [x0, #CPU_XREG_OFFSET(2)] + + /* Retrieve the host regs x0-x1 from the stack */ + ldp x2, x3, [sp], #16 // x0, x1 + + /* Store the host regs x0-x1 and x4-x17 */ + stp x2, x3, [x0, #CPU_XREG_OFFSET(0)] + stp x4, x5, [x0, #CPU_XREG_OFFSET(4)] + stp x6, x7, [x0, #CPU_XREG_OFFSET(6)] + stp x8, x9, [x0, #CPU_XREG_OFFSET(8)] + stp x10, x11, [x0, #CPU_XREG_OFFSET(10)] + stp x12, x13, [x0, #CPU_XREG_OFFSET(12)] + stp x14, x15, [x0, #CPU_XREG_OFFSET(14)] + stp x16, x17, [x0, #CPU_XREG_OFFSET(16)] + + /* Store the host regs x18-x29, lr */ + save_callee_saved_regs x0 + + /* Save the host context pointer in x29 across the function call */ + mov x29, x0 + bl handle_trap + + /* Restore host regs x0-x17 */ + ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] + ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] + ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] + ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] + ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] + ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] + ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] + ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] + ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] + + /* Restore host regs x18-x29, lr */ + restore_callee_saved_regs x29 + + /* Do not touch any register after this! */ + eret + sb +SYM_FUNC_END(__host_exit) + SYM_FUNC_START(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) @@ -34,7 +83,7 @@ SYM_FUNC_END(__hyp_do_panic) /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR - b.hs 1f + b.hs __host_exit /* * Compute the idmap address of __kvm_handle_stub_hvc and @@ -50,23 +99,6 @@ SYM_FUNC_END(__hyp_do_panic) /* x5 = __pa(x5) */ sub x5, x5, x6 br x5 - -1: - /* - * Shuffle the parameters before calling the function - * pointed to in x0. Assumes parameters in x[1,2,3]. - */ - kern_hyp_va x0 - str lr, [sp, #-16]! - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr - ldr lr, [sp], #16 - - eret - sb .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) .error "host_el1_sync_vect larger than vector entry" diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c new file mode 100644 index 000000000000..570c3896f42e --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 - Google Inc + * Author: Andrew Scull + */ + +#include + +#include +#include +#include +#include +#include + +typedef unsigned long (*hypcall_fn_t) + (unsigned long, unsigned long, unsigned long); + +void handle_trap(struct kvm_cpu_context *host_ctxt) +{ + u64 esr = read_sysreg_el2(SYS_ESR); + hypcall_fn_t func; + unsigned long ret; + + if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64) + hyp_panic(); + + /* + * __kvm_call_hyp takes a pointer in the host address space and + * up to three arguments. + */ + func = (hypcall_fn_t)kern_hyp_va(host_ctxt->regs.regs[0]); + ret = func(host_ctxt->regs.regs[1], + host_ctxt->regs.regs[2], + host_ctxt->regs.regs[3]); + host_ctxt->regs.regs[0] = ret; +} From a2e102e20fd69ce26ea9fe7bd0df26d25dafff59 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:37 +0100 Subject: [PATCH 13/19] KVM: arm64: nVHE: Handle hyp panics Restore the host context when panicking from hyp to give the best chance of the panic being clean. The host requires that registers be preserved such as x18 for the shadow callstack. If the panic is caused by an exception from EL1, the host context is still valid so the panic can return straight back to the host. If the panic comes from EL2 then it's most likely that the hyp context is active and the host context needs to be restored. There are windows before and after the host context is saved and restored that restoration is attempted incorrectly and the panic won't be clean. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-14-ascull@google.com --- arch/arm64/include/asm/kvm_hyp.h | 2 +- arch/arm64/kvm/hyp/nvhe/host.S | 76 ++++++++++++++++++++++++-------- arch/arm64/kvm/hyp/nvhe/switch.c | 18 +++----- 3 files changed, 63 insertions(+), 33 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 0b525e05e5bf..6b664de5ec1f 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -94,7 +94,7 @@ u64 __guest_enter(struct kvm_vcpu *vcpu); void __noreturn hyp_panic(void); #ifdef __KVM_NVHE_HYPERVISOR__ -void __noreturn __hyp_do_panic(unsigned long, ...); +void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par); #endif #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index d26e41773dc4..ff9a0f547b9f 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -47,6 +47,9 @@ SYM_FUNC_START(__host_exit) ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] + + /* x0-7 are use for panic arguments */ +__host_enter_for_panic: ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] @@ -57,18 +60,38 @@ SYM_FUNC_START(__host_exit) restore_callee_saved_regs x29 /* Do not touch any register after this! */ +__host_enter_without_restoring: eret sb SYM_FUNC_END(__host_exit) +/* + * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par); + */ SYM_FUNC_START(__hyp_do_panic) + /* Load the format arguments into x1-7 */ + mov x6, x3 + get_vcpu_ptr x7, x3 + + mrs x3, esr_el2 + mrs x4, far_el2 + mrs x5, hpfar_el2 + + /* Prepare and exit to the host's panic funciton. */ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr ldr lr, =panic msr elr_el2, lr - eret - sb + + /* + * Set the panic format string and enter the host, conditionally + * restoring the host context. + */ + cmp x0, xzr + ldr x0, =__hyp_panic_string + b.eq __host_enter_without_restoring + b __host_enter_for_panic SYM_FUNC_END(__hyp_do_panic) .macro host_el1_sync_vect @@ -79,7 +102,7 @@ SYM_FUNC_END(__hyp_do_panic) lsr x0, x0, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 ldp x0, x1, [sp], #16 - b.ne hyp_panic + b.ne __host_exit /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR @@ -105,16 +128,31 @@ SYM_FUNC_END(__hyp_do_panic) .endif .endm -.macro invalid_host_vect +.macro invalid_host_el2_vect .align 7 /* If a guest is loaded, panic out of it. */ stp x0, x1, [sp, #-16]! get_loaded_vcpu x0, x1 cbnz x0, __guest_exit_panic add sp, sp, #16 + + /* + * The panic may not be clean if the exception is taken before the host + * context has been saved by __host_exit or after the hyp context has + * been partially clobbered by __host_enter. + */ b hyp_panic .endm +.macro invalid_host_el1_vect + .align 7 + mov x0, xzr /* restore_host = false */ + mrs x1, spsr_el2 + mrs x2, elr_el2 + mrs x3, par_el1 + b __hyp_do_panic +.endm + /* * The host vector does not use an ESB instruction in order to avoid consuming * SErrors that should only be consumed by the host. Guest entry is deferred by @@ -127,23 +165,23 @@ SYM_FUNC_END(__hyp_do_panic) */ .align 11 SYM_CODE_START(__kvm_hyp_host_vector) - invalid_host_vect // Synchronous EL2t - invalid_host_vect // IRQ EL2t - invalid_host_vect // FIQ EL2t - invalid_host_vect // Error EL2t + invalid_host_el2_vect // Synchronous EL2t + invalid_host_el2_vect // IRQ EL2t + invalid_host_el2_vect // FIQ EL2t + invalid_host_el2_vect // Error EL2t - invalid_host_vect // Synchronous EL2h - invalid_host_vect // IRQ EL2h - invalid_host_vect // FIQ EL2h - invalid_host_vect // Error EL2h + invalid_host_el2_vect // Synchronous EL2h + invalid_host_el2_vect // IRQ EL2h + invalid_host_el2_vect // FIQ EL2h + invalid_host_el2_vect // Error EL2h host_el1_sync_vect // Synchronous 64-bit EL1 - invalid_host_vect // IRQ 64-bit EL1 - invalid_host_vect // FIQ 64-bit EL1 - invalid_host_vect // Error 64-bit EL1 + invalid_host_el1_vect // IRQ 64-bit EL1 + invalid_host_el1_vect // FIQ 64-bit EL1 + invalid_host_el1_vect // Error 64-bit EL1 - invalid_host_vect // Synchronous 32-bit EL1 - invalid_host_vect // IRQ 32-bit EL1 - invalid_host_vect // FIQ 32-bit EL1 - invalid_host_vect // Error 32-bit EL1 + invalid_host_el1_vect // Synchronous 32-bit EL1 + invalid_host_el1_vect // IRQ 32-bit EL1 + invalid_host_el1_vect // FIQ 32-bit EL1 + invalid_host_el1_vect // Error 32-bit EL1 SYM_CODE_END(__kvm_hyp_host_vector) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c99945cda779..29febf9a93f2 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -242,6 +242,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQOFF); + host_ctxt->__hyp_running_vcpu = NULL; + return exit_code; } @@ -250,31 +252,21 @@ void __noreturn hyp_panic(void) u64 spsr = read_sysreg_el2(SYS_SPSR); u64 elr = read_sysreg_el2(SYS_ELR); u64 par = read_sysreg(par_el1); + bool restore_host = true; struct kvm_cpu_context *host_ctxt; struct kvm_vcpu *vcpu; - unsigned long str_va; host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; vcpu = host_ctxt->__hyp_running_vcpu; - if (read_sysreg(vttbr_el2)) { + if (vcpu) { __timer_disable_traps(vcpu); __deactivate_traps(vcpu); __load_host_stage2(); __sysreg_restore_state_nvhe(host_ctxt); } - /* - * Force the panic string to be loaded from the literal pool, - * making sure it is a kernel address and not a PC-relative - * reference. - */ - asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string)); - - __hyp_do_panic(str_va, - spsr, elr, - read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR), - read_sysreg(hpfar_el2), par, vcpu); + __hyp_do_panic(restore_host, spsr, elr, par); unreachable(); } From 5dc33bd199ca3b9f53c28144fa9a20b43c908972 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:38 +0100 Subject: [PATCH 14/19] KVM: arm64: nVHE: Pass pointers consistently to hyp-init Rather than some being kernel pointer and others being hyp pointers, standardize on all pointers being hyp pointers. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-15-ascull@google.com --- arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/hyp/nvhe/hyp-init.S | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ae4b34f91e94..6b7180072c8d 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1278,6 +1278,7 @@ static void cpu_init_hyp_mode(void) pgd_ptr = kvm_mmu_get_httbr(); hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; + hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr); vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector)); /* diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index d9434e90c06d..abe885e26fe2 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -114,7 +114,6 @@ alternative_else_nop_endif isb /* Set the stack and new vectors */ - kern_hyp_va x1 mov sp, x1 msr vbar_el2, x2 From cf6501689012ef346cc835a38cbebb1e398d0834 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:39 +0100 Subject: [PATCH 15/19] smccc: Define vendor hyp owned service call region Vendor specific hypervisor services have their own region of function identifiers reserved by SMCCC. Extend the list of owners to include this case. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Cc: Sudeep Holla Link: https://lore.kernel.org/r/20200915104643.2543892-16-ascull@google.com --- include/linux/arm-smccc.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 15c706fb0a37..ee286f5de239 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -49,6 +49,7 @@ #define ARM_SMCCC_OWNER_OEM 3 #define ARM_SMCCC_OWNER_STANDARD 4 #define ARM_SMCCC_OWNER_STANDARD_HYP 5 +#define ARM_SMCCC_OWNER_VENDOR_HYP 6 #define ARM_SMCCC_OWNER_TRUSTED_APP 48 #define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 #define ARM_SMCCC_OWNER_TRUSTED_OS 50 From 0794a974d74dc777a212a3c58dd236f507360348 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:40 +0100 Subject: [PATCH 16/19] smccc: Use separate variables for args and results Using the same register-bound variable for both arguments and results means these values share a type. That type must allow the arguments to be assigned to it and must also be assignable to the unsigned long fields of struct arm_smccc_res. This restriction on types causes compiler warnings when the argument cannot be implicitly assigned to an unsigned long, for example the pointers that are used in the KVM hyp interface. By separating the arguments and results into their own variables, the type constraint is lifted allowing the arguments to avoid the need for any type conversion. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Cc: Sudeep Holla Link: https://lore.kernel.org/r/20200915104643.2543892-17-ascull@google.com --- include/linux/arm-smccc.h | 73 ++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 44 deletions(-) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index ee286f5de239..885c9ffc835c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -228,87 +228,67 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define __count_args(...) \ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) -#define __constraint_write_0 \ - "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) -#define __constraint_write_1 \ - "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) -#define __constraint_write_2 \ - "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) -#define __constraint_write_3 \ - "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) -#define __constraint_write_4 __constraint_write_3 -#define __constraint_write_5 __constraint_write_4 -#define __constraint_write_6 __constraint_write_5 -#define __constraint_write_7 __constraint_write_6 - -#define __constraint_read_0 -#define __constraint_read_1 -#define __constraint_read_2 -#define __constraint_read_3 -#define __constraint_read_4 "r" (r4) -#define __constraint_read_5 __constraint_read_4, "r" (r5) -#define __constraint_read_6 __constraint_read_5, "r" (r6) -#define __constraint_read_7 __constraint_read_6, "r" (r7) +#define __constraint_read_0 "r" (arg0) +#define __constraint_read_1 __constraint_read_0, "r" (arg1) +#define __constraint_read_2 __constraint_read_1, "r" (arg2) +#define __constraint_read_3 __constraint_read_2, "r" (arg3) +#define __constraint_read_4 __constraint_read_3, "r" (arg4) +#define __constraint_read_5 __constraint_read_4, "r" (arg5) +#define __constraint_read_6 __constraint_read_5, "r" (arg6) +#define __constraint_read_7 __constraint_read_6, "r" (arg7) #define __declare_arg_0(a0, res) \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1"); \ - register unsigned long r2 asm("r2"); \ - register unsigned long r3 asm("r3") + register unsigned long arg0 asm("r0") = (u32)a0 #define __declare_arg_1(a0, a1, res) \ typeof(a1) __a1 = a1; \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = __a1; \ - register unsigned long r2 asm("r2"); \ - register unsigned long r3 asm("r3") + register unsigned long arg0 asm("r0") = (u32)a0; \ + register typeof(a1) arg1 asm("r1") = __a1 #define __declare_arg_2(a0, a1, a2, res) \ typeof(a1) __a1 = a1; \ typeof(a2) __a2 = a2; \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = __a1; \ - register unsigned long r2 asm("r2") = __a2; \ - register unsigned long r3 asm("r3") + register unsigned long arg0 asm("r0") = (u32)a0; \ + register typeof(a1) arg1 asm("r1") = __a1; \ + register typeof(a2) arg2 asm("r2") = __a2 #define __declare_arg_3(a0, a1, a2, a3, res) \ typeof(a1) __a1 = a1; \ typeof(a2) __a2 = a2; \ typeof(a3) __a3 = a3; \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = __a1; \ - register unsigned long r2 asm("r2") = __a2; \ - register unsigned long r3 asm("r3") = __a3 + register unsigned long arg0 asm("r0") = (u32)a0; \ + register typeof(a1) arg1 asm("r1") = __a1; \ + register typeof(a2) arg2 asm("r2") = __a2; \ + register typeof(a3) arg3 asm("r3") = __a3 #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ typeof(a4) __a4 = a4; \ __declare_arg_3(a0, a1, a2, a3, res); \ - register unsigned long r4 asm("r4") = __a4 + register typeof(a4) arg4 asm("r4") = __a4 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ typeof(a5) __a5 = a5; \ __declare_arg_4(a0, a1, a2, a3, a4, res); \ - register unsigned long r5 asm("r5") = __a5 + register typeof(a5) arg5 asm("r5") = __a5 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ typeof(a6) __a6 = a6; \ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ - register unsigned long r6 asm("r6") = __a6 + register typeof(a6) arg6 asm("r6") = __a6 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ typeof(a7) __a7 = a7; \ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ - register unsigned long r7 asm("r7") = __a7 + register typeof(a7) arg7 asm("r7") = __a7 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) #define ___constraints(count) \ - : __constraint_write_ ## count \ : __constraint_read_ ## count \ : "memory" #define __constraints(count) ___constraints(count) @@ -320,8 +300,13 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, */ #define __arm_smccc_1_1(inst, ...) \ do { \ + register unsigned long r0 asm("r0"); \ + register unsigned long r1 asm("r1"); \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3"); \ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm volatile(inst "\n" \ + asm volatile(inst "\n" : \ + "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ __constraints(__count_args(__VA_ARGS__))); \ if (___res) \ *___res = (typeof(*___res)){r0, r1, r2, r3}; \ @@ -367,7 +352,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define __fail_smccc_1_1(...) \ do { \ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm ("" __constraints(__count_args(__VA_ARGS__))); \ + asm ("" : __constraints(__count_args(__VA_ARGS__))); \ if (___res) \ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \ } while (0) From 054698316d87a13e4c0dc8f5ecdd77b12e5eb676 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:41 +0100 Subject: [PATCH 17/19] KVM: arm64: nVHE: Migrate hyp interface to SMCCC Rather than passing arbitrary function pointers to run at hyp, define and equivalent set of SMCCC functions. Since the SMCCC functions are strongly tied to the original function prototypes, it is not expected for the host to ever call an invalid ID but a warning is raised if this does ever occur. As __kvm_vcpu_run is used for every switch between the host and a guest, it is explicitly singled out to be identified before the other function IDs to improve the performance of the hot path. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-18-ascull@google.com --- arch/arm64/include/asm/kvm_asm.h | 24 +++++++ arch/arm64/include/asm/kvm_host.h | 25 ++++--- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/hyp.S | 24 ++----- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 107 +++++++++++++++++++++++++---- 5 files changed, 139 insertions(+), 43 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 4e3073858346..3e4577013d33 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -38,6 +38,30 @@ #define __SMCCC_WORKAROUND_1_SMC_SZ 36 +#define KVM_HOST_SMCCC_ID(id) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + (id)) + +#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name) + +#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0 +#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1 +#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2 +#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3 +#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4 +#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5 +#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6 +#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7 +#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8 +#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9 +#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10 +#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11 +#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12 +#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13 +#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14 + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 16adbefde1cc..82c941cf8890 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -11,6 +11,7 @@ #ifndef __ARM64_KVM_HOST_H__ #define __ARM64_KVM_HOST_H__ +#include #include #include #include @@ -479,18 +480,20 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); -u64 __kvm_call_hyp(void *hypfn, ...); +u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr, + unsigned long hyp_stack_ptr, + unsigned long vector_ptr, + unsigned long tpidr_el2); -#define kvm_call_hyp_nvhe(f, ...) \ - do { \ - DECLARE_KVM_NVHE_SYM(f); \ - __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \ - } while(0) - -#define kvm_call_hyp_nvhe_ret(f, ...) \ +#define kvm_call_hyp_nvhe(f, ...) \ ({ \ - DECLARE_KVM_NVHE_SYM(f); \ - __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \ + struct arm_smccc_res res; \ + \ + arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ + ##__VA_ARGS__, &res); \ + WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ + \ + res.a1; \ }) /* @@ -516,7 +519,7 @@ u64 __kvm_call_hyp(void *hypfn, ...); ret = f(__VA_ARGS__); \ isb(); \ } else { \ - ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__); \ + ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ } \ \ ret; \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 6b7180072c8d..49aa08bd26de 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1288,7 +1288,7 @@ static void cpu_init_hyp_mode(void) * cpus_have_const_cap() wrapper. */ BUG_ON(!system_capabilities_finalized()); - __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); + __kvm_call_hyp_init(pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); /* * Disabling SSBD on a non-VHE system requires us to enable SSBS diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 3c79a1124af2..12aa426f7559 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -11,24 +11,12 @@ #include /* - * u64 __kvm_call_hyp(void *hypfn, ...); - * - * This is not really a variadic function in the classic C-way and care must - * be taken when calling this to ensure parameters are passed in registers - * only, since the stack will change between the caller and the callee. - * - * Call the function with the first argument containing a pointer to the - * function you wish to call in Hyp mode, and subsequent arguments will be - * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the - * function pointer can be passed). The function being called must be mapped - * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in x0. - * - * A function pointer with a value less than 0xfff has a special meaning, - * and is used to implement hyp stubs in the same way as in - * arch/arm64/kernel/hyp_stub.S. + * u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr, + * unsigned long hyp_stack_ptr, + * unsigned long vector_ptr, + * unsigned long tpidr_el2); */ -SYM_FUNC_START(__kvm_call_hyp) +SYM_FUNC_START(__kvm_call_hyp_init) hvc #0 ret -SYM_FUNC_END(__kvm_call_hyp) +SYM_FUNC_END(__kvm_call_hyp_init) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 570c3896f42e..41aaf038599a 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -12,25 +12,106 @@ #include #include -typedef unsigned long (*hypcall_fn_t) - (unsigned long, unsigned long, unsigned long); +#include + +static void handle_host_hcall(unsigned long func_id, + struct kvm_cpu_context *host_ctxt) +{ + unsigned long ret = 0; + + switch (func_id) { + case KVM_HOST_SMCCC_FUNC(__kvm_vcpu_run): { + unsigned long r1 = host_ctxt->regs.regs[1]; + struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1; + + ret = __kvm_vcpu_run(vcpu); + break; + } + case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context): + __kvm_flush_vm_context(); + break; + case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid_ipa): { + unsigned long r1 = host_ctxt->regs.regs[1]; + struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; + phys_addr_t ipa = host_ctxt->regs.regs[2]; + int level = host_ctxt->regs.regs[3]; + + __kvm_tlb_flush_vmid_ipa(mmu, ipa, level); + break; + } + case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): { + unsigned long r1 = host_ctxt->regs.regs[1]; + struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; + + __kvm_tlb_flush_vmid(mmu); + break; + } + case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): { + unsigned long r1 = host_ctxt->regs.regs[1]; + struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; + + __kvm_tlb_flush_local_vmid(mmu); + break; + } + case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): { + u64 cntvoff = host_ctxt->regs.regs[1]; + + __kvm_timer_set_cntvoff(cntvoff); + break; + } + case KVM_HOST_SMCCC_FUNC(__kvm_enable_ssbs): + __kvm_enable_ssbs(); + break; + case KVM_HOST_SMCCC_FUNC(__vgic_v3_get_ich_vtr_el2): + ret = __vgic_v3_get_ich_vtr_el2(); + break; + case KVM_HOST_SMCCC_FUNC(__vgic_v3_read_vmcr): + ret = __vgic_v3_read_vmcr(); + break; + case KVM_HOST_SMCCC_FUNC(__vgic_v3_write_vmcr): { + u32 vmcr = host_ctxt->regs.regs[1]; + + __vgic_v3_write_vmcr(vmcr); + break; + } + case KVM_HOST_SMCCC_FUNC(__vgic_v3_init_lrs): + __vgic_v3_init_lrs(); + break; + case KVM_HOST_SMCCC_FUNC(__kvm_get_mdcr_el2): + ret = __kvm_get_mdcr_el2(); + break; + case KVM_HOST_SMCCC_FUNC(__vgic_v3_save_aprs): { + unsigned long r1 = host_ctxt->regs.regs[1]; + struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1; + + __vgic_v3_save_aprs(cpu_if); + break; + } + case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): { + unsigned long r1 = host_ctxt->regs.regs[1]; + struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1; + + __vgic_v3_restore_aprs(cpu_if); + break; + } + default: + /* Invalid host HVC. */ + host_ctxt->regs.regs[0] = SMCCC_RET_NOT_SUPPORTED; + return; + } + + host_ctxt->regs.regs[0] = SMCCC_RET_SUCCESS; + host_ctxt->regs.regs[1] = ret; +} void handle_trap(struct kvm_cpu_context *host_ctxt) { u64 esr = read_sysreg_el2(SYS_ESR); - hypcall_fn_t func; - unsigned long ret; + unsigned long func_id; if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64) hyp_panic(); - /* - * __kvm_call_hyp takes a pointer in the host address space and - * up to three arguments. - */ - func = (hypcall_fn_t)kern_hyp_va(host_ctxt->regs.regs[0]); - ret = func(host_ctxt->regs.regs[1], - host_ctxt->regs.regs[2], - host_ctxt->regs.regs[3]); - host_ctxt->regs.regs[0] = ret; + func_id = host_ctxt->regs.regs[0]; + handle_host_hcall(func_id, host_ctxt); } From 04e4caa8d355d19fd7d26734bd2b9e3b563bb22a Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:42 +0100 Subject: [PATCH 18/19] KVM: arm64: nVHE: Migrate hyp-init to SMCCC To complete the transition to SMCCC, the hyp initialization is given a function ID. This looks neater than comparing the hyp stub function IDs to the page table physical address. Some care is taken to only clobber x0-3 before the host context is saved as only those registers can be clobbered accoring to SMCCC. Fortunately, only a few acrobatics are needed. The possible new tpidr_el2 is moved to the argument in x2 so that it can be stashed in tpidr_el2 early to free up a scratch register. The page table configuration then makes use of x0-2. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-19-ascull@google.com --- arch/arm64/include/asm/kvm_host.h | 5 --- arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/arm.c | 5 ++- arch/arm64/kvm/hyp.S | 22 ---------- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 70 +++++++++++++++++------------- 5 files changed, 45 insertions(+), 59 deletions(-) delete mode 100644 arch/arm64/kvm/hyp.S diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 82c941cf8890..ef0325c42ca0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -480,11 +480,6 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); -u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr, - unsigned long hyp_stack_ptr, - unsigned long vector_ptr, - unsigned long tpidr_el2); - #define kvm_call_hyp_nvhe(f, ...) \ ({ \ struct arm_smccc_res res; \ diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 99977c1972cc..1504c81fbf5d 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ $(KVM)/vfio.o $(KVM)/irqchip.o \ arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \ - inject_fault.o regmap.o va_layout.o hyp.o handle_exit.o \ + inject_fault.o regmap.o va_layout.o handle_exit.o \ guest.o debug.o reset.o sys_regs.o \ vgic-sys-reg-v3.o fpsimd.o pmu.o \ aarch32.o arch_timer.o \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 49aa08bd26de..c074d9824d54 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1264,6 +1264,7 @@ static void cpu_init_hyp_mode(void) unsigned long hyp_stack_ptr; unsigned long vector_ptr; unsigned long tpidr_el2; + struct arm_smccc_res res; /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector()); @@ -1288,7 +1289,9 @@ static void cpu_init_hyp_mode(void) * cpus_have_const_cap() wrapper. */ BUG_ON(!system_capabilities_finalized()); - __kvm_call_hyp_init(pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); + arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), + pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res); + WARN_ON(res.a0 != SMCCC_RET_SUCCESS); /* * Disabling SSBD on a non-VHE system requires us to enable SSBS diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S deleted file mode 100644 index 12aa426f7559..000000000000 --- a/arch/arm64/kvm/hyp.S +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - */ - -#include - -#include -#include -#include - -/* - * u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr, - * unsigned long hyp_stack_ptr, - * unsigned long vector_ptr, - * unsigned long tpidr_el2); - */ -SYM_FUNC_START(__kvm_call_hyp_init) - hvc #0 - ret -SYM_FUNC_END(__kvm_call_hyp_init) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index abe885e26fe2..47224dc62c51 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -4,11 +4,13 @@ * Author: Marc Zyngier */ +#include #include #include #include #include +#include #include #include #include @@ -44,27 +46,37 @@ __invalid: b . /* - * x0: HYP pgd - * x1: HYP stack - * x2: HYP vectors - * x3: per-CPU offset + * x0: SMCCC function ID + * x1: HYP pgd + * x2: per-CPU offset + * x3: HYP stack + * x4: HYP vectors */ __do_hyp_init: /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.lo __kvm_handle_stub_hvc - phys_to_ttbr x4, x0 -alternative_if ARM64_HAS_CNP - orr x4, x4, #TTBR_CNP_BIT -alternative_else_nop_endif - msr ttbr0_el2, x4 + /* Set tpidr_el2 for use by HYP to free a register */ + msr tpidr_el2, x2 - mrs x4, tcr_el1 - mov_q x5, TCR_EL2_MASK - and x4, x4, x5 - mov x5, #TCR_EL2_RES1 - orr x4, x4, x5 + mov x2, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) + cmp x0, x2 + b.eq 1f + mov x0, #SMCCC_RET_NOT_SUPPORTED + eret + +1: phys_to_ttbr x0, x1 +alternative_if ARM64_HAS_CNP + orr x0, x0, #TTBR_CNP_BIT +alternative_else_nop_endif + msr ttbr0_el2, x0 + + mrs x0, tcr_el1 + mov_q x1, TCR_EL2_MASK + and x0, x0, x1 + mov x1, #TCR_EL2_RES1 + orr x0, x0, x1 /* * The ID map may be configured to use an extended virtual address @@ -80,18 +92,18 @@ alternative_else_nop_endif * * So use the same T0SZ value we use for the ID map. */ - ldr_l x5, idmap_t0sz - bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH + ldr_l x1, idmap_t0sz + bfi x0, x1, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH /* * Set the PS bits in TCR_EL2. */ - tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6 + tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2 - msr tcr_el2, x4 + msr tcr_el2, x0 - mrs x4, mair_el1 - msr mair_el2, x4 + mrs x0, mair_el1 + msr mair_el2, x0 isb /* Invalidate the stale TLBs from Bootloader */ @@ -103,24 +115,22 @@ alternative_else_nop_endif * as well as the EE bit on BE. Drop the A flag since the compiler * is allowed to generate unaligned accesses. */ - mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) -CPU_BE( orr x4, x4, #SCTLR_ELx_EE) + mov_q x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) +CPU_BE( orr x0, x0, #SCTLR_ELx_EE) alternative_if ARM64_HAS_ADDRESS_AUTH - mov_q x5, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ + mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) - orr x4, x4, x5 + orr x0, x0, x1 alternative_else_nop_endif - msr sctlr_el2, x4 + msr sctlr_el2, x0 isb /* Set the stack and new vectors */ - mov sp, x1 - msr vbar_el2, x2 - - /* Set tpidr_el2 for use by HYP */ - msr tpidr_el2, x3 + mov sp, x3 + msr vbar_el2, x4 /* Hello, World! */ + mov x0, #SMCCC_RET_SUCCESS eret SYM_CODE_END(__kvm_hyp_init) From a071261d93184f43b7dede7516b6c449c4ed5770 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Tue, 15 Sep 2020 11:46:43 +0100 Subject: [PATCH 19/19] KVM: arm64: nVHE: Fix pointers during SMCCC convertion The host need not concern itself with the pointer differences for the hyp interfaces that are shared between VHE and nVHE so leave it to the hyp to handle. As the SMCCC function IDs are converted into function calls, it is a suitable place to also convert any pointer arguments into hyp pointers. This, additionally, eases the reuse of the handlers in different contexts. Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200915104643.2543892-20-ascull@google.com --- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 12 ++++++------ arch/arm64/kvm/hyp/nvhe/switch.c | 2 -- arch/arm64/kvm/hyp/nvhe/tlb.c | 2 -- arch/arm64/kvm/vgic/vgic-v3.c | 4 ++-- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 41aaf038599a..e2eafe2c93af 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -24,7 +24,7 @@ static void handle_host_hcall(unsigned long func_id, unsigned long r1 = host_ctxt->regs.regs[1]; struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1; - ret = __kvm_vcpu_run(vcpu); + ret = __kvm_vcpu_run(kern_hyp_va(vcpu)); break; } case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context): @@ -36,21 +36,21 @@ static void handle_host_hcall(unsigned long func_id, phys_addr_t ipa = host_ctxt->regs.regs[2]; int level = host_ctxt->regs.regs[3]; - __kvm_tlb_flush_vmid_ipa(mmu, ipa, level); + __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level); break; } case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): { unsigned long r1 = host_ctxt->regs.regs[1]; struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; - __kvm_tlb_flush_vmid(mmu); + __kvm_tlb_flush_vmid(kern_hyp_va(mmu)); break; } case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): { unsigned long r1 = host_ctxt->regs.regs[1]; struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; - __kvm_tlb_flush_local_vmid(mmu); + __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu)); break; } case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): { @@ -84,14 +84,14 @@ static void handle_host_hcall(unsigned long func_id, unsigned long r1 = host_ctxt->regs.regs[1]; struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1; - __vgic_v3_save_aprs(cpu_if); + __vgic_v3_save_aprs(kern_hyp_va(cpu_if)); break; } case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): { unsigned long r1 = host_ctxt->regs.regs[1]; struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1; - __vgic_v3_restore_aprs(cpu_if); + __vgic_v3_restore_aprs(kern_hyp_va(cpu_if)); break; } default: diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 29febf9a93f2..a29f247f35e3 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -176,8 +176,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) pmr_sync(); } - vcpu = kern_hyp_va(vcpu); - host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 69eae608d670..544bca3072b7 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -54,7 +54,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, dsb(ishst); /* Switch to requested VMID */ - mmu = kern_hyp_va(mmu); __tlb_switch_to_guest(mmu, &cxt); /* @@ -108,7 +107,6 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) dsb(ishst); /* Switch to requested VMID */ - mmu = kern_hyp_va(mmu); __tlb_switch_to_guest(mmu, &cxt); __tlbi(vmalls12e1is); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 76e2d85789ed..9cdf39a94a63 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -662,7 +662,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) if (likely(cpu_if->vgic_sre)) kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr); - kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if)); + kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if); if (has_vhe()) __vgic_v3_activate_traps(cpu_if); @@ -686,7 +686,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) vgic_v3_vmcr_sync(vcpu); - kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if)); + kvm_call_hyp(__vgic_v3_save_aprs, cpu_if); if (has_vhe()) __vgic_v3_deactivate_traps(cpu_if);