forked from Minki/linux
KVM: arm64: Parametrize exception entry with a target EL
We currently assume that an exception is delivered to EL1, always. Once we emulate EL2, this no longer will be the case. To prepare for this, add a target_mode parameter. While we're at it, merge the computing of the target PC and PSTATE in a single function that updates both PC and CPSR after saving their previous values in the corresponding ELR/SPSR. This ensures that they are updated in the correct order (a pretty common source of bugs...). Reviewed-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
349c330ced
commit
d9d7d84d99
@ -35,6 +35,7 @@
|
|||||||
#define GIC_PRIO_PSR_I_SET (1 << 4)
|
#define GIC_PRIO_PSR_I_SET (1 << 4)
|
||||||
|
|
||||||
/* Additional SPSR bits not exposed in the UABI */
|
/* Additional SPSR bits not exposed in the UABI */
|
||||||
|
#define PSR_MODE_THREAD_BIT (1 << 0)
|
||||||
#define PSR_IL_BIT (1 << 20)
|
#define PSR_IL_BIT (1 << 20)
|
||||||
|
|
||||||
/* AArch32-specific ptrace requests */
|
/* AArch32-specific ptrace requests */
|
||||||
|
@ -26,28 +26,12 @@ enum exception_type {
|
|||||||
except_type_serror = 0x180,
|
except_type_serror = 0x180,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
|
||||||
{
|
|
||||||
u64 exc_offset;
|
|
||||||
|
|
||||||
switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
|
|
||||||
case PSR_MODE_EL1t:
|
|
||||||
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
|
|
||||||
break;
|
|
||||||
case PSR_MODE_EL1h:
|
|
||||||
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
|
|
||||||
break;
|
|
||||||
case PSR_MODE_EL0t:
|
|
||||||
exc_offset = LOWER_EL_AArch64_VECTOR;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
exc_offset = LOWER_EL_AArch32_VECTOR;
|
|
||||||
}
|
|
||||||
|
|
||||||
return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* This performs the exception entry at a given EL (@target_mode), stashing PC
|
||||||
|
* and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
|
||||||
|
* The EL passed to this function *must* be a non-secure, privileged mode with
|
||||||
|
* bit 0 being set (PSTATE.SP == 1).
|
||||||
|
*
|
||||||
* When an exception is taken, most PSTATE fields are left unchanged in the
|
* When an exception is taken, most PSTATE fields are left unchanged in the
|
||||||
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
|
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
|
||||||
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
|
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
|
||||||
@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
|||||||
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
|
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
|
||||||
* MSB to LSB.
|
* MSB to LSB.
|
||||||
*/
|
*/
|
||||||
static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
|
static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
||||||
|
enum exception_type type)
|
||||||
{
|
{
|
||||||
unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
unsigned long sctlr, vbar, old, new, mode;
|
||||||
unsigned long old, new;
|
u64 exc_offset;
|
||||||
|
|
||||||
|
mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
|
||||||
|
|
||||||
|
if (mode == target_mode)
|
||||||
|
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
|
||||||
|
else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
|
||||||
|
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
|
||||||
|
else if (!(mode & PSR_MODE32_BIT))
|
||||||
|
exc_offset = LOWER_EL_AArch64_VECTOR;
|
||||||
|
else
|
||||||
|
exc_offset = LOWER_EL_AArch32_VECTOR;
|
||||||
|
|
||||||
|
switch (target_mode) {
|
||||||
|
case PSR_MODE_EL1h:
|
||||||
|
vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
|
||||||
|
sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||||
|
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/* Don't do that */
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
*vcpu_pc(vcpu) = vbar + exc_offset + type;
|
||||||
|
|
||||||
old = *vcpu_cpsr(vcpu);
|
old = *vcpu_cpsr(vcpu);
|
||||||
new = 0;
|
new = 0;
|
||||||
@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
|
|||||||
new |= PSR_I_BIT;
|
new |= PSR_I_BIT;
|
||||||
new |= PSR_F_BIT;
|
new |= PSR_F_BIT;
|
||||||
|
|
||||||
new |= PSR_MODE_EL1h;
|
new |= target_mode;
|
||||||
|
|
||||||
return new;
|
*vcpu_cpsr(vcpu) = new;
|
||||||
|
vcpu_write_spsr(vcpu, old);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
||||||
@ -116,11 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
|||||||
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
|
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
|
||||||
u32 esr = 0;
|
u32 esr = 0;
|
||||||
|
|
||||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
|
||||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
|
||||||
|
|
||||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
|
||||||
vcpu_write_spsr(vcpu, cpsr);
|
|
||||||
|
|
||||||
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
|
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
|
||||||
|
|
||||||
@ -148,14 +154,9 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
|||||||
|
|
||||||
static void inject_undef64(struct kvm_vcpu *vcpu)
|
static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
|
||||||
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
||||||
|
|
||||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
|
||||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
|
||||||
|
|
||||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
|
||||||
vcpu_write_spsr(vcpu, cpsr);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Build an unknown exception, depending on the instruction
|
* Build an unknown exception, depending on the instruction
|
||||||
|
Loading…
Reference in New Issue
Block a user