mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
Merge branch 'kvm-arm64/nvhe-hyp-context' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
81867b75db
@ -38,6 +38,30 @@
|
||||
|
||||
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
|
||||
|
||||
#define KVM_HOST_SMCCC_ID(id) \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_64, \
|
||||
ARM_SMCCC_OWNER_VENDOR_HYP, \
|
||||
(id))
|
||||
|
||||
#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
|
||||
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/mm.h>
|
||||
@ -60,10 +84,24 @@
|
||||
DECLARE_KVM_VHE_SYM(sym); \
|
||||
DECLARE_KVM_NVHE_SYM(sym)
|
||||
|
||||
#define CHOOSE_VHE_SYM(sym) sym
|
||||
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
|
||||
#if defined(__KVM_NVHE_HYPERVISOR__)
|
||||
|
||||
#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
|
||||
#define CHOOSE_NVHE_SYM(sym) sym
|
||||
/* The nVHE hypervisor shouldn't even try to access VHE symbols */
|
||||
extern void *__nvhe_undefined_symbol;
|
||||
#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
|
||||
|
||||
#elif defined(__KVM_VHE_HYPERVISOR)
|
||||
|
||||
#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
|
||||
#define CHOOSE_VHE_SYM(sym) sym
|
||||
/* The VHE hypervisor shouldn't even try to access nVHE symbols */
|
||||
extern void *__vhe_undefined_symbol;
|
||||
#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
|
||||
|
||||
#else
|
||||
|
||||
#ifndef __KVM_NVHE_HYPERVISOR__
|
||||
/*
|
||||
* BIG FAT WARNINGS:
|
||||
*
|
||||
@ -77,10 +115,9 @@
|
||||
*/
|
||||
#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
|
||||
: CHOOSE_NVHE_SYM(sym))
|
||||
#else
|
||||
/* The nVHE hypervisor shouldn't even try to access anything */
|
||||
extern void *__nvhe_undefined_symbol;
|
||||
#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
|
||||
#define CHOOSE_VHE_SYM(sym) sym
|
||||
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
|
||||
|
||||
#endif
|
||||
|
||||
/* Translate a kernel address @ptr into its equivalent linear mapping */
|
||||
@ -98,8 +135,10 @@ struct kvm_vcpu;
|
||||
struct kvm_s2_mmu;
|
||||
|
||||
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
|
||||
DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
|
||||
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
|
||||
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
|
||||
#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
|
||||
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
|
||||
|
||||
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||
@ -221,6 +260,16 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
|
||||
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
|
||||
.endm
|
||||
|
||||
.macro get_loaded_vcpu vcpu, ctxt
|
||||
hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
|
||||
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
|
||||
.endm
|
||||
|
||||
.macro set_loaded_vcpu vcpu, ctxt, tmp
|
||||
hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
|
||||
str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* KVM extable for unexpected exceptions.
|
||||
* In the same format _asm_extable, but output to a different section so that
|
||||
@ -236,6 +285,45 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
|
||||
#define CPU_LR_OFFSET CPU_XREG_OFFSET(30)
|
||||
#define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8)
|
||||
|
||||
/*
|
||||
* We treat x18 as callee-saved as the host may use it as a platform
|
||||
* register (e.g. for shadow call stack).
|
||||
*/
|
||||
.macro save_callee_saved_regs ctxt
|
||||
str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
||||
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||
stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
||||
stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
||||
stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
||||
.endm
|
||||
|
||||
.macro restore_callee_saved_regs ctxt
|
||||
// We require \ctxt is not x18-x28
|
||||
ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
||||
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||
ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
||||
ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
||||
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
||||
.endm
|
||||
|
||||
.macro save_sp_el0 ctxt, tmp
|
||||
mrs \tmp, sp_el0
|
||||
str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
|
||||
.endm
|
||||
|
||||
.macro restore_sp_el0 ctxt, tmp
|
||||
ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
|
||||
msr sp_el0, \tmp
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
||||
|
@ -11,6 +11,7 @@
|
||||
#ifndef __ARM64_KVM_HOST_H__
|
||||
#define __ARM64_KVM_HOST_H__
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
@ -262,8 +263,6 @@ struct kvm_host_data {
|
||||
struct kvm_pmu_events pmu_events;
|
||||
};
|
||||
|
||||
typedef struct kvm_host_data kvm_host_data_t;
|
||||
|
||||
struct vcpu_reset_state {
|
||||
unsigned long pc;
|
||||
unsigned long r0;
|
||||
@ -480,18 +479,15 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
void kvm_arm_halt_guest(struct kvm *kvm);
|
||||
void kvm_arm_resume_guest(struct kvm *kvm);
|
||||
|
||||
u64 __kvm_call_hyp(void *hypfn, ...);
|
||||
|
||||
#define kvm_call_hyp_nvhe(f, ...) \
|
||||
do { \
|
||||
DECLARE_KVM_NVHE_SYM(f); \
|
||||
__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
#define kvm_call_hyp_nvhe_ret(f, ...) \
|
||||
#define kvm_call_hyp_nvhe(f, ...) \
|
||||
({ \
|
||||
DECLARE_KVM_NVHE_SYM(f); \
|
||||
__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \
|
||||
struct arm_smccc_res res; \
|
||||
\
|
||||
arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
|
||||
##__VA_ARGS__, &res); \
|
||||
WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
|
||||
\
|
||||
res.a1; \
|
||||
})
|
||||
|
||||
/*
|
||||
@ -517,7 +513,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
|
||||
ret = f(__VA_ARGS__); \
|
||||
isb(); \
|
||||
} else { \
|
||||
ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__); \
|
||||
ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
|
||||
} \
|
||||
\
|
||||
ret; \
|
||||
@ -565,7 +561,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
||||
DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
|
||||
DECLARE_PER_CPU(struct kvm_host_data, kvm_host_data);
|
||||
|
||||
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
|
||||
{
|
||||
|
@ -12,6 +12,9 @@
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
|
||||
#define read_sysreg_elx(r,nvh,vh) \
|
||||
({ \
|
||||
u64 reg; \
|
||||
@ -87,11 +90,11 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
|
||||
void deactivate_traps_vhe_put(void);
|
||||
#endif
|
||||
|
||||
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
|
||||
u64 __guest_enter(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt);
|
||||
void __noreturn hyp_panic(void);
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __noreturn __hyp_do_panic(unsigned long, ...);
|
||||
void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
#endif
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_H__ */
|
||||
|
@ -60,7 +60,7 @@
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will
|
||||
* Both ptrauth_switch_to_guest and ptrauth_switch_to_hyp macros will
|
||||
* check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as
|
||||
* (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and
|
||||
* then proceed ahead with the save/restore of Pointer Authentication
|
||||
@ -78,7 +78,7 @@ alternative_else_nop_endif
|
||||
.L__skip_switch\@:
|
||||
.endm
|
||||
|
||||
.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
|
||||
.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
|
||||
alternative_if_not ARM64_HAS_ADDRESS_AUTH
|
||||
b .L__skip_switch\@
|
||||
alternative_else_nop_endif
|
||||
@ -96,7 +96,7 @@ alternative_else_nop_endif
|
||||
#else /* !CONFIG_ARM64_PTR_AUTH */
|
||||
.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
|
||||
.endm
|
||||
.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
|
||||
.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
|
||||
.endm
|
||||
#endif /* CONFIG_ARM64_PTR_AUTH */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -71,6 +71,8 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
|
||||
/* Global kernel state accessed by nVHE hyp code. */
|
||||
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
|
||||
KVM_NVHE_ALIAS(kvm_host_data);
|
||||
KVM_NVHE_ALIAS(kvm_hyp_ctxt);
|
||||
KVM_NVHE_ALIAS(kvm_hyp_vector);
|
||||
KVM_NVHE_ALIAS(kvm_vgic_global_state);
|
||||
|
||||
/* Kernel constant needed to compute idmap addresses. */
|
||||
|
@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
|
||||
kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
|
||||
$(KVM)/vfio.o $(KVM)/irqchip.o \
|
||||
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
|
||||
inject_fault.o regmap.o va_layout.o hyp.o handle_exit.o \
|
||||
inject_fault.o regmap.o va_layout.o handle_exit.o \
|
||||
guest.o debug.o reset.o sys_regs.o \
|
||||
vgic-sys-reg-v3.o fpsimd.o pmu.o \
|
||||
aarch32.o arch_timer.o \
|
||||
|
@ -46,7 +46,9 @@
|
||||
__asm__(".arch_extension virt");
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
|
||||
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
|
||||
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
|
||||
/* The VMID used in the VTTBR */
|
||||
@ -1265,6 +1267,7 @@ static void cpu_init_hyp_mode(void)
|
||||
unsigned long hyp_stack_ptr;
|
||||
unsigned long vector_ptr;
|
||||
unsigned long tpidr_el2;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
/* Switch from the HYP stub to our own HYP init vector */
|
||||
__hyp_set_vectors(kvm_get_idmap_vector());
|
||||
@ -1279,7 +1282,8 @@ static void cpu_init_hyp_mode(void)
|
||||
|
||||
pgd_ptr = kvm_mmu_get_httbr();
|
||||
hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
|
||||
vector_ptr = (unsigned long)kvm_get_hyp_vector();
|
||||
hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
|
||||
vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
|
||||
|
||||
/*
|
||||
* Call initialization code, and switch to the full blown HYP code.
|
||||
@ -1288,7 +1292,9 @@ static void cpu_init_hyp_mode(void)
|
||||
* cpus_have_const_cap() wrapper.
|
||||
*/
|
||||
BUG_ON(!system_capabilities_finalized());
|
||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
|
||||
arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init),
|
||||
pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res);
|
||||
WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
|
||||
|
||||
/*
|
||||
* Disabling SSBD on a non-VHE system requires us to enable SSBS
|
||||
@ -1312,6 +1318,8 @@ static void cpu_hyp_reinit(void)
|
||||
|
||||
cpu_hyp_reset();
|
||||
|
||||
__this_cpu_write(kvm_hyp_vector, (unsigned long)kvm_get_hyp_vector());
|
||||
|
||||
if (is_kernel_in_hyp_mode())
|
||||
kvm_timer_init_vhe();
|
||||
else
|
||||
@ -1541,7 +1549,9 @@ static int init_hyp_mode(void)
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
kvm_host_data_t *cpu_data;
|
||||
struct kvm_host_data *cpu_data;
|
||||
struct kvm_cpu_context *hyp_ctxt;
|
||||
unsigned long *vector;
|
||||
|
||||
cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
|
||||
err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
|
||||
@ -1550,6 +1560,22 @@ static int init_hyp_mode(void)
|
||||
kvm_err("Cannot map host CPU state: %d\n", err);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
hyp_ctxt = per_cpu_ptr(&kvm_hyp_ctxt, cpu);
|
||||
err = create_hyp_mappings(hyp_ctxt, hyp_ctxt + 1, PAGE_HYP);
|
||||
|
||||
if (err) {
|
||||
kvm_err("Cannot map hyp context: %d\n", err);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
vector = per_cpu_ptr(&kvm_hyp_vector, cpu);
|
||||
err = create_hyp_mappings(vector, vector + 1, PAGE_HYP);
|
||||
|
||||
if (err) {
|
||||
kvm_err("Cannot map hyp guest vector address\n");
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
err = hyp_map_aux_data();
|
||||
|
@ -1,34 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* u64 __kvm_call_hyp(void *hypfn, ...);
|
||||
*
|
||||
* This is not really a variadic function in the classic C-way and care must
|
||||
* be taken when calling this to ensure parameters are passed in registers
|
||||
* only, since the stack will change between the caller and the callee.
|
||||
*
|
||||
* Call the function with the first argument containing a pointer to the
|
||||
* function you wish to call in Hyp mode, and subsequent arguments will be
|
||||
* passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
|
||||
* function pointer can be passed). The function being called must be mapped
|
||||
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
|
||||
* passed in x0.
|
||||
*
|
||||
* A function pointer with a value less than 0xfff has a special meaning,
|
||||
* and is used to implement hyp stubs in the same way as in
|
||||
* arch/arm64/kernel/hyp_stub.S.
|
||||
*/
|
||||
SYM_FUNC_START(__kvm_call_hyp)
|
||||
hvc #0
|
||||
ret
|
||||
SYM_FUNC_END(__kvm_call_hyp)
|
@ -7,7 +7,6 @@
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/fpsimdmacros.h>
|
||||
#include <asm/kvm.h>
|
||||
@ -16,66 +15,28 @@
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_ptrauth.h>
|
||||
|
||||
#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
|
||||
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* We treat x18 as callee-saved as the host may use it as a platform
|
||||
* register (e.g. for shadow call stack).
|
||||
*/
|
||||
.macro save_callee_saved_regs ctxt
|
||||
str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
||||
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||
stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
||||
stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
||||
stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
||||
.endm
|
||||
|
||||
.macro restore_callee_saved_regs ctxt
|
||||
// We require \ctxt is not x18-x28
|
||||
ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
||||
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||
ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
||||
ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
||||
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
||||
.endm
|
||||
|
||||
.macro save_sp_el0 ctxt, tmp
|
||||
mrs \tmp, sp_el0
|
||||
str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
|
||||
.endm
|
||||
|
||||
.macro restore_sp_el0 ctxt, tmp
|
||||
ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
|
||||
msr sp_el0, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
* u64 __guest_enter(struct kvm_vcpu *vcpu,
|
||||
* struct kvm_cpu_context *host_ctxt);
|
||||
* u64 __guest_enter(struct kvm_vcpu *vcpu);
|
||||
*/
|
||||
SYM_FUNC_START(__guest_enter)
|
||||
// x0: vcpu
|
||||
// x1: host context
|
||||
// x2-x17: clobbered by macros
|
||||
// x1-x17: clobbered by macros
|
||||
// x29: guest context
|
||||
|
||||
// Store the host regs
|
||||
hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2
|
||||
|
||||
// Store the hyp regs
|
||||
save_callee_saved_regs x1
|
||||
|
||||
// Save the host's sp_el0
|
||||
// Save hyp's sp_el0
|
||||
save_sp_el0 x1, x2
|
||||
|
||||
// Now the host state is stored if we have a pending RAS SError it must
|
||||
// affect the host. If any asynchronous exception is pending we defer
|
||||
// the guest entry. The DSB isn't necessary before v8.2 as any SError
|
||||
// would be fatal.
|
||||
// Now the hyp state is stored if we have a pending RAS SError it must
|
||||
// affect the host or hyp. If any asynchronous exception is pending we
|
||||
// defer the guest entry. The DSB isn't necessary before v8.2 as any
|
||||
// SError would be fatal.
|
||||
alternative_if ARM64_HAS_RAS_EXTN
|
||||
dsb nshst
|
||||
isb
|
||||
@ -86,6 +47,8 @@ alternative_else_nop_endif
|
||||
ret
|
||||
|
||||
1:
|
||||
set_loaded_vcpu x0, x1, x2
|
||||
|
||||
add x29, x0, #VCPU_CONTEXT
|
||||
|
||||
// Macro ptrauth_switch_to_guest format:
|
||||
@ -116,6 +79,26 @@ alternative_else_nop_endif
|
||||
eret
|
||||
sb
|
||||
|
||||
SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
||||
// x2-x29,lr: vcpu regs
|
||||
// vcpu x0-x1 on the stack
|
||||
|
||||
// If the hyp context is loaded, go straight to hyp_panic
|
||||
get_loaded_vcpu x0, x1
|
||||
cbz x0, hyp_panic
|
||||
|
||||
// The hyp context is saved so make sure it is restored to allow
|
||||
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
|
||||
// This makes use of __guest_exit to avoid duplication but sets the
|
||||
// return address to tail call into hyp_panic. As a side effect, the
|
||||
// current state is saved to the guest context but it will only be
|
||||
// accurate if the guest had been completely restored.
|
||||
hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1
|
||||
adr x1, hyp_panic
|
||||
str x1, [x0, #CPU_XREG_OFFSET(30)]
|
||||
|
||||
get_vcpu_ptr x1, x0
|
||||
|
||||
SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
|
||||
// x0: return code
|
||||
// x1: vcpu
|
||||
@ -148,21 +131,23 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
|
||||
// Store the guest's sp_el0
|
||||
save_sp_el0 x1, x2
|
||||
|
||||
get_host_ctxt x2, x3
|
||||
hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3
|
||||
|
||||
// Macro ptrauth_switch_to_guest format:
|
||||
// ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
|
||||
// Macro ptrauth_switch_to_hyp format:
|
||||
// ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
|
||||
// The below macro to save/restore keys is not implemented in C code
|
||||
// as it may cause Pointer Authentication key signing mismatch errors
|
||||
// when this feature is enabled for kernel code.
|
||||
ptrauth_switch_to_host x1, x2, x3, x4, x5
|
||||
ptrauth_switch_to_hyp x1, x2, x3, x4, x5
|
||||
|
||||
// Restore the hosts's sp_el0
|
||||
// Restore hyp's sp_el0
|
||||
restore_sp_el0 x2, x3
|
||||
|
||||
// Now restore the host regs
|
||||
// Now restore the hyp regs
|
||||
restore_callee_saved_regs x2
|
||||
|
||||
set_loaded_vcpu xzr, x1, x2
|
||||
|
||||
alternative_if ARM64_HAS_RAS_EXTN
|
||||
// If we have the RAS extensions we can consume a pending error
|
||||
// without an unmask-SError and isb. The ESB-instruction consumed any
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
.macro save_caller_saved_regs_vect
|
||||
@ -41,20 +40,6 @@
|
||||
|
||||
.text
|
||||
|
||||
.macro do_el2_call
|
||||
/*
|
||||
* Shuffle the parameters before calling the function
|
||||
* pointed to in x0. Assumes parameters in x[1,2,3].
|
||||
*/
|
||||
str lr, [sp, #-16]!
|
||||
mov lr, x0
|
||||
mov x0, x1
|
||||
mov x1, x2
|
||||
mov x2, x3
|
||||
blr lr
|
||||
ldr lr, [sp], #16
|
||||
.endm
|
||||
|
||||
el1_sync: // Guest trapped into EL2
|
||||
|
||||
mrs x0, esr_el2
|
||||
@ -63,44 +48,6 @@ el1_sync: // Guest trapped into EL2
|
||||
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
|
||||
b.ne el1_trap
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
mrs x1, vttbr_el2 // If vttbr is valid, the guest
|
||||
cbnz x1, el1_hvc_guest // called HVC
|
||||
|
||||
/* Here, we're pretty sure the host called HVC. */
|
||||
ldp x0, x1, [sp], #16
|
||||
|
||||
/* Check for a stub HVC call */
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.hs 1f
|
||||
|
||||
/*
|
||||
* Compute the idmap address of __kvm_handle_stub_hvc and
|
||||
* jump there. Since we use kimage_voffset, do not use the
|
||||
* HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
|
||||
* (by loading it from the constant pool).
|
||||
*
|
||||
* Preserve x0-x4, which may contain stub parameters.
|
||||
*/
|
||||
ldr x5, =__kvm_handle_stub_hvc
|
||||
ldr_l x6, kimage_voffset
|
||||
|
||||
/* x5 = __pa(x5) */
|
||||
sub x5, x5, x6
|
||||
br x5
|
||||
|
||||
1:
|
||||
/*
|
||||
* Perform the EL2 call
|
||||
*/
|
||||
kern_hyp_va x0
|
||||
do_el2_call
|
||||
|
||||
eret
|
||||
sb
|
||||
#endif /* __KVM_NVHE_HYPERVISOR__ */
|
||||
|
||||
el1_hvc_guest:
|
||||
/*
|
||||
* Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
|
||||
* The workaround has already been applied on the host,
|
||||
@ -198,24 +145,7 @@ el2_error:
|
||||
eret
|
||||
sb
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
msr spsr_el2, lr
|
||||
ldr lr, =panic
|
||||
msr elr_el2, lr
|
||||
eret
|
||||
sb
|
||||
SYM_FUNC_END(__hyp_do_panic)
|
||||
#endif
|
||||
|
||||
SYM_CODE_START(__hyp_panic)
|
||||
get_host_ctxt x0, x1
|
||||
b hyp_panic
|
||||
SYM_CODE_END(__hyp_panic)
|
||||
|
||||
.macro invalid_vector label, target = __hyp_panic
|
||||
.macro invalid_vector label, target = __guest_exit_panic
|
||||
.align 2
|
||||
SYM_CODE_START(\label)
|
||||
b \target
|
||||
@ -227,7 +157,6 @@ SYM_CODE_END(\label)
|
||||
invalid_vector el2t_irq_invalid
|
||||
invalid_vector el2t_fiq_invalid
|
||||
invalid_vector el2t_error_invalid
|
||||
invalid_vector el2h_sync_invalid
|
||||
invalid_vector el2h_irq_invalid
|
||||
invalid_vector el2h_fiq_invalid
|
||||
invalid_vector el1_fiq_invalid
|
||||
@ -257,10 +186,9 @@ check_preamble_length 661b, 662b
|
||||
.macro invalid_vect target
|
||||
.align 7
|
||||
661:
|
||||
b \target
|
||||
nop
|
||||
stp x0, x1, [sp, #-16]!
|
||||
662:
|
||||
ldp x0, x1, [sp], #16
|
||||
b \target
|
||||
|
||||
check_preamble_length 661b, 662b
|
||||
|
@ -126,11 +126,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __activate_vm(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
__load_guest_stage2(mmu);
|
||||
}
|
||||
|
||||
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
|
||||
{
|
||||
u64 par, tmp;
|
||||
@ -377,6 +372,8 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
|
||||
ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
|
||||
} while(0)
|
||||
|
||||
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
|
||||
static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *ctxt;
|
||||
@ -386,7 +383,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
|
||||
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
|
||||
return false;
|
||||
|
||||
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||
ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt);
|
||||
__ptrauth_save_key(ctxt, APIA);
|
||||
__ptrauth_save_key(ctxt, APIB);
|
||||
__ptrauth_save_key(ctxt, APDA);
|
||||
@ -514,14 +511,13 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline void __kvm_unexpected_el2_exception(void)
|
||||
{
|
||||
extern char __guest_exit_panic[];
|
||||
unsigned long addr, fixup;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct exception_table_entry *entry, *end;
|
||||
unsigned long elr_el2 = read_sysreg(elr_el2);
|
||||
|
||||
entry = hyp_symbol_addr(__start___kvm_ex_table);
|
||||
end = hyp_symbol_addr(__stop___kvm_ex_table);
|
||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||
|
||||
while (entry < end) {
|
||||
addr = (unsigned long)&entry->insn + entry->insn;
|
||||
@ -536,7 +532,8 @@ static inline void __kvm_unexpected_el2_exception(void)
|
||||
return;
|
||||
}
|
||||
|
||||
hyp_panic(host_ctxt);
|
||||
/* Trigger a panic after restoring the hyp context. */
|
||||
write_sysreg(__guest_exit_panic, elr_el2);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
|
||||
|
@ -6,7 +6,7 @@
|
||||
asflags-y := -D__KVM_NVHE_HYPERVISOR__
|
||||
ccflags-y := -D__KVM_NVHE_HYPERVISOR__
|
||||
|
||||
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o
|
||||
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-main.o
|
||||
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
|
||||
../fpsimd.o ../hyp-entry.o
|
||||
|
||||
|
187
arch/arm64/kvm/hyp/nvhe/host.S
Normal file
187
arch/arm64/kvm/hyp/nvhe/host.S
Normal file
@ -0,0 +1,187 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020 - Google Inc
|
||||
* Author: Andrew Scull <ascull@google.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
.text
|
||||
|
||||
SYM_FUNC_START(__host_exit)
|
||||
stp x0, x1, [sp, #-16]!
|
||||
|
||||
get_host_ctxt x0, x1
|
||||
|
||||
ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
|
||||
|
||||
/* Store the host regs x2 and x3 */
|
||||
stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
|
||||
|
||||
/* Retrieve the host regs x0-x1 from the stack */
|
||||
ldp x2, x3, [sp], #16 // x0, x1
|
||||
|
||||
/* Store the host regs x0-x1 and x4-x17 */
|
||||
stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]
|
||||
stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]
|
||||
stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]
|
||||
stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]
|
||||
stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
|
||||
stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
|
||||
stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
|
||||
stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
|
||||
|
||||
/* Store the host regs x18-x29, lr */
|
||||
save_callee_saved_regs x0
|
||||
|
||||
/* Save the host context pointer in x29 across the function call */
|
||||
mov x29, x0
|
||||
bl handle_trap
|
||||
|
||||
/* Restore host regs x0-x17 */
|
||||
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
|
||||
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
|
||||
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
|
||||
ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
|
||||
|
||||
/* x0-7 are use for panic arguments */
|
||||
__host_enter_for_panic:
|
||||
ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
|
||||
ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
|
||||
ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
|
||||
ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
|
||||
ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
|
||||
|
||||
/* Restore host regs x18-x29, lr */
|
||||
restore_callee_saved_regs x29
|
||||
|
||||
/* Do not touch any register after this! */
|
||||
__host_enter_without_restoring:
|
||||
eret
|
||||
sb
|
||||
SYM_FUNC_END(__host_exit)
|
||||
|
||||
/*
|
||||
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
*/
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
/* Load the format arguments into x1-7 */
|
||||
mov x6, x3
|
||||
get_vcpu_ptr x7, x3
|
||||
|
||||
mrs x3, esr_el2
|
||||
mrs x4, far_el2
|
||||
mrs x5, hpfar_el2
|
||||
|
||||
/* Prepare and exit to the host's panic funciton. */
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
msr spsr_el2, lr
|
||||
ldr lr, =panic
|
||||
msr elr_el2, lr
|
||||
|
||||
/*
|
||||
* Set the panic format string and enter the host, conditionally
|
||||
* restoring the host context.
|
||||
*/
|
||||
cmp x0, xzr
|
||||
ldr x0, =__hyp_panic_string
|
||||
b.eq __host_enter_without_restoring
|
||||
b __host_enter_for_panic
|
||||
SYM_FUNC_END(__hyp_do_panic)
|
||||
|
||||
.macro host_el1_sync_vect
|
||||
.align 7
|
||||
.L__vect_start\@:
|
||||
stp x0, x1, [sp, #-16]!
|
||||
mrs x0, esr_el2
|
||||
lsr x0, x0, #ESR_ELx_EC_SHIFT
|
||||
cmp x0, #ESR_ELx_EC_HVC64
|
||||
ldp x0, x1, [sp], #16
|
||||
b.ne __host_exit
|
||||
|
||||
/* Check for a stub HVC call */
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.hs __host_exit
|
||||
|
||||
/*
|
||||
* Compute the idmap address of __kvm_handle_stub_hvc and
|
||||
* jump there. Since we use kimage_voffset, do not use the
|
||||
* HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
|
||||
* (by loading it from the constant pool).
|
||||
*
|
||||
* Preserve x0-x4, which may contain stub parameters.
|
||||
*/
|
||||
ldr x5, =__kvm_handle_stub_hvc
|
||||
ldr_l x6, kimage_voffset
|
||||
|
||||
/* x5 = __pa(x5) */
|
||||
sub x5, x5, x6
|
||||
br x5
|
||||
.L__vect_end\@:
|
||||
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
|
||||
.error "host_el1_sync_vect larger than vector entry"
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro invalid_host_el2_vect
|
||||
.align 7
|
||||
/* If a guest is loaded, panic out of it. */
|
||||
stp x0, x1, [sp, #-16]!
|
||||
get_loaded_vcpu x0, x1
|
||||
cbnz x0, __guest_exit_panic
|
||||
add sp, sp, #16
|
||||
|
||||
/*
|
||||
* The panic may not be clean if the exception is taken before the host
|
||||
* context has been saved by __host_exit or after the hyp context has
|
||||
* been partially clobbered by __host_enter.
|
||||
*/
|
||||
b hyp_panic
|
||||
.endm
|
||||
|
||||
.macro invalid_host_el1_vect
|
||||
.align 7
|
||||
mov x0, xzr /* restore_host = false */
|
||||
mrs x1, spsr_el2
|
||||
mrs x2, elr_el2
|
||||
mrs x3, par_el1
|
||||
b __hyp_do_panic
|
||||
.endm
|
||||
|
||||
/*
|
||||
* The host vector does not use an ESB instruction in order to avoid consuming
|
||||
* SErrors that should only be consumed by the host. Guest entry is deferred by
|
||||
* __guest_enter if there are any pending asynchronous exceptions so hyp will
|
||||
* always return to the host without having consumerd host SErrors.
|
||||
*
|
||||
* CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
|
||||
* host knows about the EL2 vectors already, and there is no point in hiding
|
||||
* them.
|
||||
*/
|
||||
.align 11
|
||||
SYM_CODE_START(__kvm_hyp_host_vector)
|
||||
invalid_host_el2_vect // Synchronous EL2t
|
||||
invalid_host_el2_vect // IRQ EL2t
|
||||
invalid_host_el2_vect // FIQ EL2t
|
||||
invalid_host_el2_vect // Error EL2t
|
||||
|
||||
invalid_host_el2_vect // Synchronous EL2h
|
||||
invalid_host_el2_vect // IRQ EL2h
|
||||
invalid_host_el2_vect // FIQ EL2h
|
||||
invalid_host_el2_vect // Error EL2h
|
||||
|
||||
host_el1_sync_vect // Synchronous 64-bit EL1
|
||||
invalid_host_el1_vect // IRQ 64-bit EL1
|
||||
invalid_host_el1_vect // FIQ 64-bit EL1
|
||||
invalid_host_el1_vect // Error 64-bit EL1
|
||||
|
||||
invalid_host_el1_vect // Synchronous 32-bit EL1
|
||||
invalid_host_el1_vect // IRQ 32-bit EL1
|
||||
invalid_host_el1_vect // FIQ 32-bit EL1
|
||||
invalid_host_el1_vect // Error 32-bit EL1
|
||||
SYM_CODE_END(__kvm_hyp_host_vector)
|
@ -4,11 +4,13 @@
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/sysreg.h>
|
||||
@ -44,27 +46,37 @@ __invalid:
|
||||
b .
|
||||
|
||||
/*
|
||||
* x0: HYP pgd
|
||||
* x1: HYP stack
|
||||
* x2: HYP vectors
|
||||
* x3: per-CPU offset
|
||||
* x0: SMCCC function ID
|
||||
* x1: HYP pgd
|
||||
* x2: per-CPU offset
|
||||
* x3: HYP stack
|
||||
* x4: HYP vectors
|
||||
*/
|
||||
__do_hyp_init:
|
||||
/* Check for a stub HVC call */
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.lo __kvm_handle_stub_hvc
|
||||
|
||||
phys_to_ttbr x4, x0
|
||||
alternative_if ARM64_HAS_CNP
|
||||
orr x4, x4, #TTBR_CNP_BIT
|
||||
alternative_else_nop_endif
|
||||
msr ttbr0_el2, x4
|
||||
/* Set tpidr_el2 for use by HYP to free a register */
|
||||
msr tpidr_el2, x2
|
||||
|
||||
mrs x4, tcr_el1
|
||||
mov_q x5, TCR_EL2_MASK
|
||||
and x4, x4, x5
|
||||
mov x5, #TCR_EL2_RES1
|
||||
orr x4, x4, x5
|
||||
mov x2, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
|
||||
cmp x0, x2
|
||||
b.eq 1f
|
||||
mov x0, #SMCCC_RET_NOT_SUPPORTED
|
||||
eret
|
||||
|
||||
1: phys_to_ttbr x0, x1
|
||||
alternative_if ARM64_HAS_CNP
|
||||
orr x0, x0, #TTBR_CNP_BIT
|
||||
alternative_else_nop_endif
|
||||
msr ttbr0_el2, x0
|
||||
|
||||
mrs x0, tcr_el1
|
||||
mov_q x1, TCR_EL2_MASK
|
||||
and x0, x0, x1
|
||||
mov x1, #TCR_EL2_RES1
|
||||
orr x0, x0, x1
|
||||
|
||||
/*
|
||||
* The ID map may be configured to use an extended virtual address
|
||||
@ -80,18 +92,18 @@ alternative_else_nop_endif
|
||||
*
|
||||
* So use the same T0SZ value we use for the ID map.
|
||||
*/
|
||||
ldr_l x5, idmap_t0sz
|
||||
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
|
||||
ldr_l x1, idmap_t0sz
|
||||
bfi x0, x1, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
|
||||
|
||||
/*
|
||||
* Set the PS bits in TCR_EL2.
|
||||
*/
|
||||
tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6
|
||||
tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
|
||||
|
||||
msr tcr_el2, x4
|
||||
msr tcr_el2, x0
|
||||
|
||||
mrs x4, mair_el1
|
||||
msr mair_el2, x4
|
||||
mrs x0, mair_el1
|
||||
msr mair_el2, x0
|
||||
isb
|
||||
|
||||
/* Invalidate the stale TLBs from Bootloader */
|
||||
@ -103,25 +115,22 @@ alternative_else_nop_endif
|
||||
* as well as the EE bit on BE. Drop the A flag since the compiler
|
||||
* is allowed to generate unaligned accesses.
|
||||
*/
|
||||
mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
|
||||
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
|
||||
mov_q x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
|
||||
CPU_BE( orr x0, x0, #SCTLR_ELx_EE)
|
||||
alternative_if ARM64_HAS_ADDRESS_AUTH
|
||||
mov_q x5, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
|
||||
mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
|
||||
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
|
||||
orr x4, x4, x5
|
||||
orr x0, x0, x1
|
||||
alternative_else_nop_endif
|
||||
msr sctlr_el2, x4
|
||||
msr sctlr_el2, x0
|
||||
isb
|
||||
|
||||
/* Set the stack and new vectors */
|
||||
kern_hyp_va x1
|
||||
mov sp, x1
|
||||
msr vbar_el2, x2
|
||||
|
||||
/* Set tpidr_el2 for use by HYP */
|
||||
msr tpidr_el2, x3
|
||||
mov sp, x3
|
||||
msr vbar_el2, x4
|
||||
|
||||
/* Hello, World! */
|
||||
mov x0, #SMCCC_RET_SUCCESS
|
||||
eret
|
||||
SYM_CODE_END(__kvm_hyp_init)
|
||||
|
||||
|
117
arch/arm64/kvm/hyp/nvhe/hyp-main.c
Normal file
117
arch/arm64/kvm/hyp/nvhe/hyp-main.c
Normal file
@ -0,0 +1,117 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020 - Google Inc
|
||||
* Author: Andrew Scull <ascull@google.com>
|
||||
*/
|
||||
|
||||
#include <hyp/switch.h>
|
||||
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
|
||||
static void handle_host_hcall(unsigned long func_id,
|
||||
struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
unsigned long ret = 0;
|
||||
|
||||
switch (func_id) {
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_vcpu_run): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1;
|
||||
|
||||
ret = __kvm_vcpu_run(kern_hyp_va(vcpu));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context):
|
||||
__kvm_flush_vm_context();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid_ipa): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
|
||||
phys_addr_t ipa = host_ctxt->regs.regs[2];
|
||||
int level = host_ctxt->regs.regs[3];
|
||||
|
||||
__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
|
||||
|
||||
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
|
||||
|
||||
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
|
||||
u64 cntvoff = host_ctxt->regs.regs[1];
|
||||
|
||||
__kvm_timer_set_cntvoff(cntvoff);
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_enable_ssbs):
|
||||
__kvm_enable_ssbs();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_get_ich_vtr_el2):
|
||||
ret = __vgic_v3_get_ich_vtr_el2();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_read_vmcr):
|
||||
ret = __vgic_v3_read_vmcr();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_write_vmcr): {
|
||||
u32 vmcr = host_ctxt->regs.regs[1];
|
||||
|
||||
__vgic_v3_write_vmcr(vmcr);
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_init_lrs):
|
||||
__vgic_v3_init_lrs();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_get_mdcr_el2):
|
||||
ret = __kvm_get_mdcr_el2();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_save_aprs): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
|
||||
|
||||
__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
|
||||
|
||||
__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
/* Invalid host HVC. */
|
||||
host_ctxt->regs.regs[0] = SMCCC_RET_NOT_SUPPORTED;
|
||||
return;
|
||||
}
|
||||
|
||||
host_ctxt->regs.regs[0] = SMCCC_RET_SUCCESS;
|
||||
host_ctxt->regs.regs[1] = ret;
|
||||
}
|
||||
|
||||
void handle_trap(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
u64 esr = read_sysreg_el2(SYS_ESR);
|
||||
unsigned long func_id;
|
||||
|
||||
if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)
|
||||
hyp_panic();
|
||||
|
||||
func_id = host_ctxt->regs.regs[0];
|
||||
handle_host_hcall(func_id, host_ctxt);
|
||||
}
|
@ -42,6 +42,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
write_sysreg(val, cptr_el2);
|
||||
write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
|
||||
@ -60,6 +61,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
extern char __kvm_hyp_host_vector[];
|
||||
u64 mdcr_el2;
|
||||
|
||||
___deactivate_traps(vcpu);
|
||||
@ -91,9 +93,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(mdcr_el2, mdcr_el2);
|
||||
write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
|
||||
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
|
||||
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
|
||||
}
|
||||
|
||||
static void __deactivate_vm(struct kvm_vcpu *vcpu)
|
||||
static void __load_host_stage2(void)
|
||||
{
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
@ -173,8 +176,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
pmr_sync();
|
||||
}
|
||||
|
||||
vcpu = kern_hyp_va(vcpu);
|
||||
|
||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||
host_ctxt->__hyp_running_vcpu = vcpu;
|
||||
guest_ctxt = &vcpu->arch.ctxt;
|
||||
@ -194,7 +195,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_state_nvhe(guest_ctxt);
|
||||
|
||||
__activate_vm(kern_hyp_va(vcpu->arch.hw_mmu));
|
||||
__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
|
||||
__activate_traps(vcpu);
|
||||
|
||||
__hyp_vgic_restore_state(vcpu);
|
||||
@ -206,7 +207,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
do {
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu, host_ctxt);
|
||||
exit_code = __guest_enter(vcpu);
|
||||
|
||||
/* And we're baaack! */
|
||||
} while (fixup_guest_exit(vcpu, &exit_code));
|
||||
@ -219,7 +220,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
__hyp_vgic_save_state(vcpu);
|
||||
|
||||
__deactivate_traps(vcpu);
|
||||
__deactivate_vm(vcpu);
|
||||
__load_host_stage2();
|
||||
|
||||
__sysreg_restore_state_nvhe(host_ctxt);
|
||||
|
||||
@ -239,35 +240,31 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQOFF);
|
||||
|
||||
host_ctxt->__hyp_running_vcpu = NULL;
|
||||
|
||||
return exit_code;
|
||||
}
|
||||
|
||||
void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
|
||||
void __noreturn hyp_panic(void)
|
||||
{
|
||||
u64 spsr = read_sysreg_el2(SYS_SPSR);
|
||||
u64 elr = read_sysreg_el2(SYS_ELR);
|
||||
u64 par = read_sysreg(par_el1);
|
||||
struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu;
|
||||
unsigned long str_va;
|
||||
bool restore_host = true;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (read_sysreg(vttbr_el2)) {
|
||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||
vcpu = host_ctxt->__hyp_running_vcpu;
|
||||
|
||||
if (vcpu) {
|
||||
__timer_disable_traps(vcpu);
|
||||
__deactivate_traps(vcpu);
|
||||
__deactivate_vm(vcpu);
|
||||
__load_host_stage2();
|
||||
__sysreg_restore_state_nvhe(host_ctxt);
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the panic string to be loaded from the literal pool,
|
||||
* making sure it is a kernel address and not a PC-relative
|
||||
* reference.
|
||||
*/
|
||||
asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
|
||||
|
||||
__hyp_do_panic(str_va,
|
||||
spsr, elr,
|
||||
read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
|
||||
read_sysreg(hpfar_el2), par, vcpu);
|
||||
__hyp_do_panic(restore_host, spsr, elr, par);
|
||||
unreachable();
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
|
||||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
mmu = kern_hyp_va(mmu);
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
/*
|
||||
@ -108,7 +107,6 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
||||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
mmu = kern_hyp_va(mmu);
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalls12e1is);
|
||||
|
@ -59,7 +59,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
write_sysreg(val, cpacr_el1);
|
||||
|
||||
write_sysreg(kvm_get_hyp_vector(), vbar_el1);
|
||||
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(__activate_traps);
|
||||
|
||||
@ -120,12 +120,12 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
* HCR_EL2.TGE.
|
||||
*
|
||||
* We have already configured the guest's stage 1 translation in
|
||||
* kvm_vcpu_load_sysregs_vhe above. We must now call __activate_vm
|
||||
* before __activate_traps, because __activate_vm configures
|
||||
* stage 2 translation, and __activate_traps clear HCR_EL2.TGE
|
||||
* (among other things).
|
||||
* kvm_vcpu_load_sysregs_vhe above. We must now call
|
||||
* __load_guest_stage2 before __activate_traps, because
|
||||
* __load_guest_stage2 configures stage 2 translation, and
|
||||
* __activate_traps clear HCR_EL2.TGE (among other things).
|
||||
*/
|
||||
__activate_vm(vcpu->arch.hw_mmu);
|
||||
__load_guest_stage2(vcpu->arch.hw_mmu);
|
||||
__activate_traps(vcpu);
|
||||
|
||||
sysreg_restore_guest_state_vhe(guest_ctxt);
|
||||
@ -135,7 +135,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
|
||||
do {
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu, host_ctxt);
|
||||
exit_code = __guest_enter(vcpu);
|
||||
|
||||
/* And we're baaack! */
|
||||
} while (fixup_guest_exit(vcpu, &exit_code));
|
||||
@ -192,10 +192,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __hyp_call_panic(u64 spsr, u64 elr, u64 par,
|
||||
struct kvm_cpu_context *host_ctxt)
|
||||
static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||
vcpu = host_ctxt->__hyp_running_vcpu;
|
||||
|
||||
__deactivate_traps(vcpu);
|
||||
@ -208,13 +210,13 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par,
|
||||
}
|
||||
NOKPROBE_SYMBOL(__hyp_call_panic);
|
||||
|
||||
void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
|
||||
void __noreturn hyp_panic(void)
|
||||
{
|
||||
u64 spsr = read_sysreg_el2(SYS_SPSR);
|
||||
u64 elr = read_sysreg_el2(SYS_ELR);
|
||||
u64 par = read_sysreg(par_el1);
|
||||
|
||||
__hyp_call_panic(spsr, elr, par, host_ctxt);
|
||||
__hyp_call_panic(spsr, elr, par);
|
||||
unreachable();
|
||||
}
|
||||
|
||||
|
@ -662,7 +662,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
|
||||
if (likely(cpu_if->vgic_sre))
|
||||
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if));
|
||||
kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
|
||||
|
||||
if (has_vhe())
|
||||
__vgic_v3_activate_traps(cpu_if);
|
||||
@ -686,7 +686,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
|
||||
|
||||
vgic_v3_vmcr_sync(vcpu);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if));
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
|
||||
|
||||
if (has_vhe())
|
||||
__vgic_v3_deactivate_traps(cpu_if);
|
||||
|
@ -49,6 +49,7 @@
|
||||
#define ARM_SMCCC_OWNER_OEM 3
|
||||
#define ARM_SMCCC_OWNER_STANDARD 4
|
||||
#define ARM_SMCCC_OWNER_STANDARD_HYP 5
|
||||
#define ARM_SMCCC_OWNER_VENDOR_HYP 6
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
|
||||
@ -227,87 +228,67 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
#define __count_args(...) \
|
||||
___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
|
||||
#define __constraint_write_0 \
|
||||
"+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
|
||||
#define __constraint_write_1 \
|
||||
"+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
|
||||
#define __constraint_write_2 \
|
||||
"+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
|
||||
#define __constraint_write_3 \
|
||||
"+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
|
||||
#define __constraint_write_4 __constraint_write_3
|
||||
#define __constraint_write_5 __constraint_write_4
|
||||
#define __constraint_write_6 __constraint_write_5
|
||||
#define __constraint_write_7 __constraint_write_6
|
||||
|
||||
#define __constraint_read_0
|
||||
#define __constraint_read_1
|
||||
#define __constraint_read_2
|
||||
#define __constraint_read_3
|
||||
#define __constraint_read_4 "r" (r4)
|
||||
#define __constraint_read_5 __constraint_read_4, "r" (r5)
|
||||
#define __constraint_read_6 __constraint_read_5, "r" (r6)
|
||||
#define __constraint_read_7 __constraint_read_6, "r" (r7)
|
||||
#define __constraint_read_0 "r" (arg0)
|
||||
#define __constraint_read_1 __constraint_read_0, "r" (arg1)
|
||||
#define __constraint_read_2 __constraint_read_1, "r" (arg2)
|
||||
#define __constraint_read_3 __constraint_read_2, "r" (arg3)
|
||||
#define __constraint_read_4 __constraint_read_3, "r" (arg4)
|
||||
#define __constraint_read_5 __constraint_read_4, "r" (arg5)
|
||||
#define __constraint_read_6 __constraint_read_5, "r" (arg6)
|
||||
#define __constraint_read_7 __constraint_read_6, "r" (arg7)
|
||||
|
||||
#define __declare_arg_0(a0, res) \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register unsigned long r0 asm("r0") = (u32)a0; \
|
||||
register unsigned long r1 asm("r1"); \
|
||||
register unsigned long r2 asm("r2"); \
|
||||
register unsigned long r3 asm("r3")
|
||||
register unsigned long arg0 asm("r0") = (u32)a0
|
||||
|
||||
#define __declare_arg_1(a0, a1, res) \
|
||||
typeof(a1) __a1 = a1; \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register unsigned long r0 asm("r0") = (u32)a0; \
|
||||
register unsigned long r1 asm("r1") = __a1; \
|
||||
register unsigned long r2 asm("r2"); \
|
||||
register unsigned long r3 asm("r3")
|
||||
register unsigned long arg0 asm("r0") = (u32)a0; \
|
||||
register typeof(a1) arg1 asm("r1") = __a1
|
||||
|
||||
#define __declare_arg_2(a0, a1, a2, res) \
|
||||
typeof(a1) __a1 = a1; \
|
||||
typeof(a2) __a2 = a2; \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register unsigned long r0 asm("r0") = (u32)a0; \
|
||||
register unsigned long r1 asm("r1") = __a1; \
|
||||
register unsigned long r2 asm("r2") = __a2; \
|
||||
register unsigned long r3 asm("r3")
|
||||
register unsigned long arg0 asm("r0") = (u32)a0; \
|
||||
register typeof(a1) arg1 asm("r1") = __a1; \
|
||||
register typeof(a2) arg2 asm("r2") = __a2
|
||||
|
||||
#define __declare_arg_3(a0, a1, a2, a3, res) \
|
||||
typeof(a1) __a1 = a1; \
|
||||
typeof(a2) __a2 = a2; \
|
||||
typeof(a3) __a3 = a3; \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register unsigned long r0 asm("r0") = (u32)a0; \
|
||||
register unsigned long r1 asm("r1") = __a1; \
|
||||
register unsigned long r2 asm("r2") = __a2; \
|
||||
register unsigned long r3 asm("r3") = __a3
|
||||
register unsigned long arg0 asm("r0") = (u32)a0; \
|
||||
register typeof(a1) arg1 asm("r1") = __a1; \
|
||||
register typeof(a2) arg2 asm("r2") = __a2; \
|
||||
register typeof(a3) arg3 asm("r3") = __a3
|
||||
|
||||
#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
|
||||
typeof(a4) __a4 = a4; \
|
||||
__declare_arg_3(a0, a1, a2, a3, res); \
|
||||
register unsigned long r4 asm("r4") = __a4
|
||||
register typeof(a4) arg4 asm("r4") = __a4
|
||||
|
||||
#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
|
||||
typeof(a5) __a5 = a5; \
|
||||
__declare_arg_4(a0, a1, a2, a3, a4, res); \
|
||||
register unsigned long r5 asm("r5") = __a5
|
||||
register typeof(a5) arg5 asm("r5") = __a5
|
||||
|
||||
#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
|
||||
typeof(a6) __a6 = a6; \
|
||||
__declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
|
||||
register unsigned long r6 asm("r6") = __a6
|
||||
register typeof(a6) arg6 asm("r6") = __a6
|
||||
|
||||
#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
|
||||
typeof(a7) __a7 = a7; \
|
||||
__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
|
||||
register unsigned long r7 asm("r7") = __a7
|
||||
register typeof(a7) arg7 asm("r7") = __a7
|
||||
|
||||
#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
|
||||
#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
|
||||
|
||||
#define ___constraints(count) \
|
||||
: __constraint_write_ ## count \
|
||||
: __constraint_read_ ## count \
|
||||
: "memory"
|
||||
#define __constraints(count) ___constraints(count)
|
||||
@ -319,8 +300,13 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
*/
|
||||
#define __arm_smccc_1_1(inst, ...) \
|
||||
do { \
|
||||
register unsigned long r0 asm("r0"); \
|
||||
register unsigned long r1 asm("r1"); \
|
||||
register unsigned long r2 asm("r2"); \
|
||||
register unsigned long r3 asm("r3"); \
|
||||
__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
|
||||
asm volatile(inst "\n" \
|
||||
asm volatile(inst "\n" : \
|
||||
"=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
|
||||
__constraints(__count_args(__VA_ARGS__))); \
|
||||
if (___res) \
|
||||
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
|
||||
@ -366,7 +352,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
#define __fail_smccc_1_1(...) \
|
||||
do { \
|
||||
__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
|
||||
asm ("" __constraints(__count_args(__VA_ARGS__))); \
|
||||
asm ("" : __constraints(__count_args(__VA_ARGS__))); \
|
||||
if (___res) \
|
||||
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
|
||||
} while (0)
|
||||
|
Loading…
Reference in New Issue
Block a user