Merge branch kvm-arm64/selftest/debug into kvmarm-master/next
Guest self-hosted debug tests from Ricardo Koller * kvm-arm64/selftest/debug: KVM: selftests: Add aarch64/debug-exceptions test KVM: selftests: Add exception handling support for aarch64 KVM: selftests: Move GUEST_ASSERT_EQ to utils header KVM: selftests: Introduce UCALL_UNHANDLED for unhandled vector reporting KVM: selftests: Complete x86_64/sync_regs_test ucall KVM: selftests: Rename vm_handle_exception
This commit is contained in:
commit
fbba7e69b0
tools/testing/selftests/kvm
1
tools/testing/selftests/kvm/.gitignore
vendored
1
tools/testing/selftests/kvm/.gitignore
vendored
@ -1,4 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
/aarch64/debug-exceptions
|
||||
/aarch64/get-reg-list
|
||||
/aarch64/get-reg-list-sve
|
||||
/aarch64/vgic_init
|
||||
|
@ -35,7 +35,7 @@ endif
|
||||
|
||||
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
|
||||
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
|
||||
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
|
||||
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S
|
||||
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
|
||||
|
||||
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
|
||||
@ -78,6 +78,7 @@ TEST_GEN_PROGS_x86_64 += memslot_perf_test
|
||||
TEST_GEN_PROGS_x86_64 += set_memory_region_test
|
||||
TEST_GEN_PROGS_x86_64 += steal_time
|
||||
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
|
||||
|
250
tools/testing/selftests/kvm/aarch64/debug-exceptions.c
Normal file
250
tools/testing/selftests/kvm/aarch64/debug-exceptions.c
Normal file
@ -0,0 +1,250 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_util.h>
|
||||
#include <kvm_util.h>
|
||||
#include <processor.h>
|
||||
|
||||
#define VCPU_ID 0
|
||||
|
||||
#define MDSCR_KDE (1 << 13)
|
||||
#define MDSCR_MDE (1 << 15)
|
||||
#define MDSCR_SS (1 << 0)
|
||||
|
||||
#define DBGBCR_LEN8 (0xff << 5)
|
||||
#define DBGBCR_EXEC (0x0 << 3)
|
||||
#define DBGBCR_EL1 (0x1 << 1)
|
||||
#define DBGBCR_E (0x1 << 0)
|
||||
|
||||
#define DBGWCR_LEN8 (0xff << 5)
|
||||
#define DBGWCR_RD (0x1 << 3)
|
||||
#define DBGWCR_WR (0x2 << 3)
|
||||
#define DBGWCR_EL1 (0x1 << 1)
|
||||
#define DBGWCR_E (0x1 << 0)
|
||||
|
||||
#define SPSR_D (1 << 9)
|
||||
#define SPSR_SS (1 << 21)
|
||||
|
||||
extern unsigned char sw_bp, hw_bp, bp_svc, bp_brk, hw_wp, ss_start;
|
||||
static volatile uint64_t sw_bp_addr, hw_bp_addr;
|
||||
static volatile uint64_t wp_addr, wp_data_addr;
|
||||
static volatile uint64_t svc_addr;
|
||||
static volatile uint64_t ss_addr[4], ss_idx;
|
||||
#define PC(v) ((uint64_t)&(v))
|
||||
|
||||
static void reset_debug_state(void)
|
||||
{
|
||||
asm volatile("msr daifset, #8");
|
||||
|
||||
write_sysreg(osdlr_el1, 0);
|
||||
write_sysreg(oslar_el1, 0);
|
||||
isb();
|
||||
|
||||
write_sysreg(mdscr_el1, 0);
|
||||
/* This test only uses the first bp and wp slot. */
|
||||
write_sysreg(dbgbvr0_el1, 0);
|
||||
write_sysreg(dbgbcr0_el1, 0);
|
||||
write_sysreg(dbgwcr0_el1, 0);
|
||||
write_sysreg(dbgwvr0_el1, 0);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void install_wp(uint64_t addr)
|
||||
{
|
||||
uint32_t wcr;
|
||||
uint32_t mdscr;
|
||||
|
||||
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
|
||||
write_sysreg(dbgwcr0_el1, wcr);
|
||||
write_sysreg(dbgwvr0_el1, addr);
|
||||
isb();
|
||||
|
||||
asm volatile("msr daifclr, #8");
|
||||
|
||||
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
|
||||
write_sysreg(mdscr_el1, mdscr);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void install_hw_bp(uint64_t addr)
|
||||
{
|
||||
uint32_t bcr;
|
||||
uint32_t mdscr;
|
||||
|
||||
bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
|
||||
write_sysreg(dbgbcr0_el1, bcr);
|
||||
write_sysreg(dbgbvr0_el1, addr);
|
||||
isb();
|
||||
|
||||
asm volatile("msr daifclr, #8");
|
||||
|
||||
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
|
||||
write_sysreg(mdscr_el1, mdscr);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void install_ss(void)
|
||||
{
|
||||
uint32_t mdscr;
|
||||
|
||||
asm volatile("msr daifclr, #8");
|
||||
|
||||
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
|
||||
write_sysreg(mdscr_el1, mdscr);
|
||||
isb();
|
||||
}
|
||||
|
||||
static volatile char write_data;
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
GUEST_SYNC(0);
|
||||
|
||||
/* Software-breakpoint */
|
||||
asm volatile("sw_bp: brk #0");
|
||||
GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
|
||||
|
||||
GUEST_SYNC(1);
|
||||
|
||||
/* Hardware-breakpoint */
|
||||
reset_debug_state();
|
||||
install_hw_bp(PC(hw_bp));
|
||||
asm volatile("hw_bp: nop");
|
||||
GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
|
||||
|
||||
GUEST_SYNC(2);
|
||||
|
||||
/* Hardware-breakpoint + svc */
|
||||
reset_debug_state();
|
||||
install_hw_bp(PC(bp_svc));
|
||||
asm volatile("bp_svc: svc #0");
|
||||
GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
|
||||
GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
|
||||
|
||||
GUEST_SYNC(3);
|
||||
|
||||
/* Hardware-breakpoint + software-breakpoint */
|
||||
reset_debug_state();
|
||||
install_hw_bp(PC(bp_brk));
|
||||
asm volatile("bp_brk: brk #0");
|
||||
GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
|
||||
GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
|
||||
|
||||
GUEST_SYNC(4);
|
||||
|
||||
/* Watchpoint */
|
||||
reset_debug_state();
|
||||
install_wp(PC(write_data));
|
||||
write_data = 'x';
|
||||
GUEST_ASSERT_EQ(write_data, 'x');
|
||||
GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
|
||||
|
||||
GUEST_SYNC(5);
|
||||
|
||||
/* Single-step */
|
||||
reset_debug_state();
|
||||
install_ss();
|
||||
ss_idx = 0;
|
||||
asm volatile("ss_start:\n"
|
||||
"mrs x0, esr_el1\n"
|
||||
"add x0, x0, #1\n"
|
||||
"msr daifset, #8\n"
|
||||
: : : "x0");
|
||||
GUEST_ASSERT_EQ(ss_addr[0], PC(ss_start));
|
||||
GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
|
||||
GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void guest_sw_bp_handler(struct ex_regs *regs)
|
||||
{
|
||||
sw_bp_addr = regs->pc;
|
||||
regs->pc += 4;
|
||||
}
|
||||
|
||||
static void guest_hw_bp_handler(struct ex_regs *regs)
|
||||
{
|
||||
hw_bp_addr = regs->pc;
|
||||
regs->pstate |= SPSR_D;
|
||||
}
|
||||
|
||||
static void guest_wp_handler(struct ex_regs *regs)
|
||||
{
|
||||
wp_data_addr = read_sysreg(far_el1);
|
||||
wp_addr = regs->pc;
|
||||
regs->pstate |= SPSR_D;
|
||||
}
|
||||
|
||||
static void guest_ss_handler(struct ex_regs *regs)
|
||||
{
|
||||
GUEST_ASSERT_1(ss_idx < 4, ss_idx);
|
||||
ss_addr[ss_idx++] = regs->pc;
|
||||
regs->pstate |= SPSR_SS;
|
||||
}
|
||||
|
||||
static void guest_svc_handler(struct ex_regs *regs)
|
||||
{
|
||||
svc_addr = regs->pc;
|
||||
}
|
||||
|
||||
static int debug_version(struct kvm_vm *vm)
|
||||
{
|
||||
uint64_t id_aa64dfr0;
|
||||
|
||||
get_reg(vm, VCPU_ID, ARM64_SYS_REG(ID_AA64DFR0_EL1), &id_aa64dfr0);
|
||||
return id_aa64dfr0 & 0xf;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
struct ucall uc;
|
||||
int stage;
|
||||
|
||||
vm = vm_create_default(VCPU_ID, 0, guest_code);
|
||||
ucall_init(vm, NULL);
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vm, VCPU_ID);
|
||||
|
||||
if (debug_version(vm) < 6) {
|
||||
print_skip("Armv8 debug architecture not supported.");
|
||||
kvm_vm_free(vm);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_BRK_INS, guest_sw_bp_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_HW_BP_CURRENT, guest_hw_bp_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_WP_CURRENT, guest_wp_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_SSTEP_CURRENT, guest_ss_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_SVC64, guest_svc_handler);
|
||||
|
||||
for (stage = 0; stage < 7; stage++) {
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_SYNC:
|
||||
TEST_ASSERT(uc.args[1] == stage,
|
||||
"Stage %d: Unexpected sync ucall, got %lx",
|
||||
stage, (ulong)uc.args[1]);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
|
||||
(const char *)uc.args[0],
|
||||
__FILE__, uc.args[1], uc.args[2], uc.args[3]);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
default:
|
||||
TEST_FAIL("Unknown ucall %lu", uc.cmd);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
kvm_vm_free(vm);
|
||||
return 0;
|
||||
}
|
@ -8,16 +8,20 @@
|
||||
#define SELFTEST_KVM_PROCESSOR_H
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include <linux/stringify.h>
|
||||
|
||||
|
||||
#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
|
||||
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
|
||||
|
||||
#define CPACR_EL1 3, 0, 1, 0, 2
|
||||
#define TCR_EL1 3, 0, 2, 0, 2
|
||||
#define MAIR_EL1 3, 0, 10, 2, 0
|
||||
#define TTBR0_EL1 3, 0, 2, 0, 0
|
||||
#define SCTLR_EL1 3, 0, 1, 0, 0
|
||||
#define CPACR_EL1 3, 0, 1, 0, 2
|
||||
#define TCR_EL1 3, 0, 2, 0, 2
|
||||
#define MAIR_EL1 3, 0, 10, 2, 0
|
||||
#define TTBR0_EL1 3, 0, 2, 0, 0
|
||||
#define SCTLR_EL1 3, 0, 1, 0, 0
|
||||
#define VBAR_EL1 3, 0, 12, 0, 0
|
||||
|
||||
#define ID_AA64DFR0_EL1 3, 0, 0, 5, 0
|
||||
|
||||
/*
|
||||
* Default MAIR
|
||||
@ -56,4 +60,73 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini
|
||||
void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_vcpu_init *init, void *guest_code);
|
||||
|
||||
struct ex_regs {
|
||||
u64 regs[31];
|
||||
u64 sp;
|
||||
u64 pc;
|
||||
u64 pstate;
|
||||
};
|
||||
|
||||
#define VECTOR_NUM 16
|
||||
|
||||
enum {
|
||||
VECTOR_SYNC_CURRENT_SP0,
|
||||
VECTOR_IRQ_CURRENT_SP0,
|
||||
VECTOR_FIQ_CURRENT_SP0,
|
||||
VECTOR_ERROR_CURRENT_SP0,
|
||||
|
||||
VECTOR_SYNC_CURRENT,
|
||||
VECTOR_IRQ_CURRENT,
|
||||
VECTOR_FIQ_CURRENT,
|
||||
VECTOR_ERROR_CURRENT,
|
||||
|
||||
VECTOR_SYNC_LOWER_64,
|
||||
VECTOR_IRQ_LOWER_64,
|
||||
VECTOR_FIQ_LOWER_64,
|
||||
VECTOR_ERROR_LOWER_64,
|
||||
|
||||
VECTOR_SYNC_LOWER_32,
|
||||
VECTOR_IRQ_LOWER_32,
|
||||
VECTOR_FIQ_LOWER_32,
|
||||
VECTOR_ERROR_LOWER_32,
|
||||
};
|
||||
|
||||
#define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \
|
||||
(v) == VECTOR_SYNC_CURRENT || \
|
||||
(v) == VECTOR_SYNC_LOWER_64 || \
|
||||
(v) == VECTOR_SYNC_LOWER_32)
|
||||
|
||||
#define ESR_EC_NUM 64
|
||||
#define ESR_EC_SHIFT 26
|
||||
#define ESR_EC_MASK (ESR_EC_NUM - 1)
|
||||
|
||||
#define ESR_EC_SVC64 0x15
|
||||
#define ESR_EC_HW_BP_CURRENT 0x31
|
||||
#define ESR_EC_SSTEP_CURRENT 0x33
|
||||
#define ESR_EC_WP_CURRENT 0x35
|
||||
#define ESR_EC_BRK_INS 0x3c
|
||||
|
||||
void vm_init_descriptor_tables(struct kvm_vm *vm);
|
||||
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
|
||||
typedef void(*handler_fn)(struct ex_regs *);
|
||||
void vm_install_exception_handler(struct kvm_vm *vm,
|
||||
int vector, handler_fn handler);
|
||||
void vm_install_sync_handler(struct kvm_vm *vm,
|
||||
int vector, int ec, handler_fn handler);
|
||||
|
||||
#define write_sysreg(reg, val) \
|
||||
({ \
|
||||
u64 __val = (u64)(val); \
|
||||
asm volatile("msr " __stringify(reg) ", %x0" : : "rZ" (__val)); \
|
||||
})
|
||||
|
||||
#define read_sysreg(reg) \
|
||||
({ u64 val; \
|
||||
asm volatile("mrs %0, "__stringify(reg) : "=r"(val) : : "memory");\
|
||||
val; \
|
||||
})
|
||||
|
||||
#define isb() asm volatile("isb" : : : "memory")
|
||||
|
||||
#endif /* SELFTEST_KVM_PROCESSOR_H */
|
||||
|
@ -349,6 +349,7 @@ enum {
|
||||
UCALL_SYNC,
|
||||
UCALL_ABORT,
|
||||
UCALL_DONE,
|
||||
UCALL_UNHANDLED,
|
||||
};
|
||||
|
||||
#define UCALL_MAX_ARGS 6
|
||||
@ -367,26 +368,28 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
|
||||
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
|
||||
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
|
||||
#define GUEST_DONE() ucall(UCALL_DONE, 0)
|
||||
#define __GUEST_ASSERT(_condition, _nargs, _args...) do { \
|
||||
if (!(_condition)) \
|
||||
ucall(UCALL_ABORT, 2 + _nargs, \
|
||||
"Failed guest assert: " \
|
||||
#_condition, __LINE__, _args); \
|
||||
#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do { \
|
||||
if (!(_condition)) \
|
||||
ucall(UCALL_ABORT, 2 + _nargs, \
|
||||
"Failed guest assert: " \
|
||||
_condstr, __LINE__, _args); \
|
||||
} while (0)
|
||||
|
||||
#define GUEST_ASSERT(_condition) \
|
||||
__GUEST_ASSERT((_condition), 0, 0)
|
||||
__GUEST_ASSERT(_condition, #_condition, 0, 0)
|
||||
|
||||
#define GUEST_ASSERT_1(_condition, arg1) \
|
||||
__GUEST_ASSERT((_condition), 1, (arg1))
|
||||
__GUEST_ASSERT(_condition, #_condition, 1, (arg1))
|
||||
|
||||
#define GUEST_ASSERT_2(_condition, arg1, arg2) \
|
||||
__GUEST_ASSERT((_condition), 2, (arg1), (arg2))
|
||||
__GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2))
|
||||
|
||||
#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \
|
||||
__GUEST_ASSERT((_condition), 3, (arg1), (arg2), (arg3))
|
||||
__GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3))
|
||||
|
||||
#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \
|
||||
__GUEST_ASSERT((_condition), 4, (arg1), (arg2), (arg3), (arg4))
|
||||
__GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4))
|
||||
|
||||
#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b)
|
||||
|
||||
#endif /* SELFTEST_KVM_UTIL_H */
|
||||
|
@ -53,8 +53,6 @@
|
||||
#define CPUID_PKU (1ul << 3)
|
||||
#define CPUID_LA57 (1ul << 16)
|
||||
|
||||
#define UNEXPECTED_VECTOR_PORT 0xfff0u
|
||||
|
||||
/* General Registers in 64-Bit Mode */
|
||||
struct gpr64_regs {
|
||||
u64 rax;
|
||||
@ -391,7 +389,7 @@ struct ex_regs {
|
||||
|
||||
void vm_init_descriptor_tables(struct kvm_vm *vm);
|
||||
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vm_handle_exception(struct kvm_vm *vm, int vector,
|
||||
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
void (*handler)(struct ex_regs *));
|
||||
|
||||
/*
|
||||
|
126
tools/testing/selftests/kvm/lib/aarch64/handlers.S
Normal file
126
tools/testing/selftests/kvm/lib/aarch64/handlers.S
Normal file
@ -0,0 +1,126 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
.macro save_registers
|
||||
add sp, sp, #-16 * 17
|
||||
|
||||
stp x0, x1, [sp, #16 * 0]
|
||||
stp x2, x3, [sp, #16 * 1]
|
||||
stp x4, x5, [sp, #16 * 2]
|
||||
stp x6, x7, [sp, #16 * 3]
|
||||
stp x8, x9, [sp, #16 * 4]
|
||||
stp x10, x11, [sp, #16 * 5]
|
||||
stp x12, x13, [sp, #16 * 6]
|
||||
stp x14, x15, [sp, #16 * 7]
|
||||
stp x16, x17, [sp, #16 * 8]
|
||||
stp x18, x19, [sp, #16 * 9]
|
||||
stp x20, x21, [sp, #16 * 10]
|
||||
stp x22, x23, [sp, #16 * 11]
|
||||
stp x24, x25, [sp, #16 * 12]
|
||||
stp x26, x27, [sp, #16 * 13]
|
||||
stp x28, x29, [sp, #16 * 14]
|
||||
|
||||
/*
|
||||
* This stores sp_el1 into ex_regs.sp so exception handlers can "look"
|
||||
* at it. It will _not_ be used to restore the sp on return from the
|
||||
* exception so handlers can not update it.
|
||||
*/
|
||||
add x1, sp, #16 * 17
|
||||
stp x30, x1, [sp, #16 * 15] /* x30, SP */
|
||||
|
||||
mrs x1, elr_el1
|
||||
mrs x2, spsr_el1
|
||||
stp x1, x2, [sp, #16 * 16] /* PC, PSTATE */
|
||||
.endm
|
||||
|
||||
.macro restore_registers
|
||||
ldp x1, x2, [sp, #16 * 16] /* PC, PSTATE */
|
||||
msr elr_el1, x1
|
||||
msr spsr_el1, x2
|
||||
|
||||
/* sp is not restored */
|
||||
ldp x30, xzr, [sp, #16 * 15] /* x30, SP */
|
||||
|
||||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldp x26, x27, [sp, #16 * 13]
|
||||
ldp x24, x25, [sp, #16 * 12]
|
||||
ldp x22, x23, [sp, #16 * 11]
|
||||
ldp x20, x21, [sp, #16 * 10]
|
||||
ldp x18, x19, [sp, #16 * 9]
|
||||
ldp x16, x17, [sp, #16 * 8]
|
||||
ldp x14, x15, [sp, #16 * 7]
|
||||
ldp x12, x13, [sp, #16 * 6]
|
||||
ldp x10, x11, [sp, #16 * 5]
|
||||
ldp x8, x9, [sp, #16 * 4]
|
||||
ldp x6, x7, [sp, #16 * 3]
|
||||
ldp x4, x5, [sp, #16 * 2]
|
||||
ldp x2, x3, [sp, #16 * 1]
|
||||
ldp x0, x1, [sp, #16 * 0]
|
||||
|
||||
add sp, sp, #16 * 17
|
||||
|
||||
eret
|
||||
.endm
|
||||
|
||||
.pushsection ".entry.text", "ax"
|
||||
.balign 0x800
|
||||
.global vectors
|
||||
vectors:
|
||||
.popsection
|
||||
|
||||
.set vector, 0
|
||||
|
||||
/*
|
||||
* Build an exception handler for vector and append a jump to it into
|
||||
* vectors (while making sure that it's 0x80 aligned).
|
||||
*/
|
||||
.macro HANDLER, label
|
||||
handler_\label:
|
||||
save_registers
|
||||
mov x0, sp
|
||||
mov x1, #vector
|
||||
bl route_exception
|
||||
restore_registers
|
||||
|
||||
.pushsection ".entry.text", "ax"
|
||||
.balign 0x80
|
||||
b handler_\label
|
||||
.popsection
|
||||
|
||||
.set vector, vector + 1
|
||||
.endm
|
||||
|
||||
.macro HANDLER_INVALID
|
||||
.pushsection ".entry.text", "ax"
|
||||
.balign 0x80
|
||||
/* This will abort so no need to save and restore registers. */
|
||||
mov x0, #vector
|
||||
mov x1, #0 /* ec */
|
||||
mov x2, #0 /* valid_ec */
|
||||
b kvm_exit_unexpected_exception
|
||||
.popsection
|
||||
|
||||
.set vector, vector + 1
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Caution: be sure to not add anything between the declaration of vectors
|
||||
* above and these macro calls that will build the vectors table below it.
|
||||
*/
|
||||
HANDLER_INVALID // Synchronous EL1t
|
||||
HANDLER_INVALID // IRQ EL1t
|
||||
HANDLER_INVALID // FIQ EL1t
|
||||
HANDLER_INVALID // Error EL1t
|
||||
|
||||
HANDLER el1h_sync // Synchronous EL1h
|
||||
HANDLER el1h_irq // IRQ EL1h
|
||||
HANDLER el1h_fiq // FIQ EL1h
|
||||
HANDLER el1h_error // Error EL1h
|
||||
|
||||
HANDLER el0_sync_64 // Synchronous 64-bit EL0
|
||||
HANDLER el0_irq_64 // IRQ 64-bit EL0
|
||||
HANDLER el0_fiq_64 // FIQ 64-bit EL0
|
||||
HANDLER el0_error_64 // Error 64-bit EL0
|
||||
|
||||
HANDLER el0_sync_32 // Synchronous 32-bit EL0
|
||||
HANDLER el0_irq_32 // IRQ 32-bit EL0
|
||||
HANDLER el0_fiq_32 // FIQ 32-bit EL0
|
||||
HANDLER el0_error_32 // Error 32-bit EL0
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "../kvm_util_internal.h"
|
||||
@ -14,6 +15,8 @@
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
|
||||
|
||||
static vm_vaddr_t exception_handlers;
|
||||
|
||||
static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
|
||||
{
|
||||
return (v + vm->page_size) & ~(vm->page_size - 1);
|
||||
@ -334,6 +337,100 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
|
||||
{
|
||||
ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
|
||||
while (1)
|
||||
;
|
||||
}
|
||||
|
||||
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
{
|
||||
struct ucall uc;
|
||||
|
||||
if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED)
|
||||
return;
|
||||
|
||||
if (uc.args[2]) /* valid_ec */ {
|
||||
assert(VECTOR_IS_SYNC(uc.args[0]));
|
||||
TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
|
||||
uc.args[0], uc.args[1]);
|
||||
} else {
|
||||
assert(!VECTOR_IS_SYNC(uc.args[0]));
|
||||
TEST_FAIL("Unexpected exception (vector:0x%lx)",
|
||||
uc.args[0]);
|
||||
}
|
||||
}
|
||||
|
||||
struct handlers {
|
||||
handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
|
||||
};
|
||||
|
||||
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
{
|
||||
extern char vectors;
|
||||
|
||||
set_reg(vm, vcpuid, ARM64_SYS_REG(VBAR_EL1), (uint64_t)&vectors);
|
||||
}
|
||||
|
||||
void route_exception(struct ex_regs *regs, int vector)
|
||||
{
|
||||
struct handlers *handlers = (struct handlers *)exception_handlers;
|
||||
bool valid_ec;
|
||||
int ec = 0;
|
||||
|
||||
switch (vector) {
|
||||
case VECTOR_SYNC_CURRENT:
|
||||
case VECTOR_SYNC_LOWER_64:
|
||||
ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
|
||||
valid_ec = true;
|
||||
break;
|
||||
case VECTOR_IRQ_CURRENT:
|
||||
case VECTOR_IRQ_LOWER_64:
|
||||
case VECTOR_FIQ_CURRENT:
|
||||
case VECTOR_FIQ_LOWER_64:
|
||||
case VECTOR_ERROR_CURRENT:
|
||||
case VECTOR_ERROR_LOWER_64:
|
||||
ec = 0;
|
||||
valid_ec = false;
|
||||
break;
|
||||
default:
|
||||
valid_ec = false;
|
||||
goto unexpected_exception;
|
||||
}
|
||||
|
||||
if (handlers && handlers->exception_handlers[vector][ec])
|
||||
return handlers->exception_handlers[vector][ec](regs);
|
||||
|
||||
unexpected_exception:
|
||||
kvm_exit_unexpected_exception(vector, ec, valid_ec);
|
||||
}
|
||||
|
||||
void vm_init_descriptor_tables(struct kvm_vm *vm)
|
||||
{
|
||||
vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
|
||||
vm->page_size, 0, 0);
|
||||
|
||||
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
|
||||
}
|
||||
|
||||
void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
|
||||
void (*handler)(struct ex_regs *))
|
||||
{
|
||||
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
|
||||
|
||||
assert(VECTOR_IS_SYNC(vector));
|
||||
assert(vector < VECTOR_NUM);
|
||||
assert(ec < ESR_EC_NUM);
|
||||
handlers->exception_handlers[vector][ec] = handler;
|
||||
}
|
||||
|
||||
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
void (*handler)(struct ex_regs *))
|
||||
{
|
||||
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
|
||||
|
||||
assert(!VECTOR_IS_SYNC(vector));
|
||||
assert(vector < VECTOR_NUM);
|
||||
handlers->exception_handlers[vector][0] = handler;
|
||||
}
|
||||
|
@ -1201,7 +1201,7 @@ static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
|
||||
|
||||
void kvm_exit_unexpected_vector(uint32_t value)
|
||||
{
|
||||
outl(UNEXPECTED_VECTOR_PORT, value);
|
||||
ucall(UCALL_UNHANDLED, 1, value);
|
||||
}
|
||||
|
||||
void route_exception(struct ex_regs *regs)
|
||||
@ -1244,8 +1244,8 @@ void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
|
||||
}
|
||||
|
||||
void vm_handle_exception(struct kvm_vm *vm, int vector,
|
||||
void (*handler)(struct ex_regs *))
|
||||
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
void (*handler)(struct ex_regs *))
|
||||
{
|
||||
vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
|
||||
|
||||
@ -1254,16 +1254,13 @@ void vm_handle_exception(struct kvm_vm *vm, int vector,
|
||||
|
||||
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
{
|
||||
if (vcpu_state(vm, vcpuid)->exit_reason == KVM_EXIT_IO
|
||||
&& vcpu_state(vm, vcpuid)->io.port == UNEXPECTED_VECTOR_PORT
|
||||
&& vcpu_state(vm, vcpuid)->io.size == 4) {
|
||||
/* Grab pointer to io data */
|
||||
uint32_t *data = (void *)vcpu_state(vm, vcpuid)
|
||||
+ vcpu_state(vm, vcpuid)->io.data_offset;
|
||||
struct ucall uc;
|
||||
|
||||
TEST_ASSERT(false,
|
||||
"Unexpected vectored event in guest (vector:0x%x)",
|
||||
*data);
|
||||
if (get_ucall(vm, vcpuid, &uc) == UCALL_UNHANDLED) {
|
||||
uint64_t vector = uc.args[0];
|
||||
|
||||
TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
|
||||
vector);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,8 +154,8 @@ int main(int argc, char *argv[])
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vm, VCPU_ID);
|
||||
vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
|
||||
vm_handle_exception(vm, NMI_VECTOR, guest_nmi_handler);
|
||||
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
|
||||
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
|
||||
|
||||
pr_info("Running L1 which uses EVMCS to run L2\n");
|
||||
|
||||
|
@ -227,7 +227,7 @@ int main(void)
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vm, VCPU_ID);
|
||||
vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
|
||||
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
|
||||
|
||||
enter_guest(vm);
|
||||
kvm_vm_free(vm);
|
||||
|
@ -24,6 +24,10 @@
|
||||
|
||||
#define UCALL_PIO_PORT ((uint16_t)0x1000)
|
||||
|
||||
struct ucall uc_none = {
|
||||
.cmd = UCALL_NONE,
|
||||
};
|
||||
|
||||
/*
|
||||
* ucall is embedded here to protect against compiler reshuffling registers
|
||||
* before calling a function. In this test we only need to get KVM_EXIT_IO
|
||||
@ -34,7 +38,8 @@ void guest_code(void)
|
||||
asm volatile("1: in %[port], %%al\n"
|
||||
"add $0x1, %%rbx\n"
|
||||
"jmp 1b"
|
||||
: : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
|
||||
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc_none)
|
||||
: "rax", "rbx");
|
||||
}
|
||||
|
||||
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
|
||||
|
@ -18,15 +18,6 @@
|
||||
#define rounded_rdmsr(x) ROUND(rdmsr(x))
|
||||
#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vm, 0, x))
|
||||
|
||||
#define GUEST_ASSERT_EQ(a, b) do { \
|
||||
__typeof(a) _a = (a); \
|
||||
__typeof(b) _b = (b); \
|
||||
if (_a != _b) \
|
||||
ucall(UCALL_ABORT, 4, \
|
||||
"Failed guest assert: " \
|
||||
#a " == " #b, __LINE__, _a, _b); \
|
||||
} while(0)
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
u64 val = 0;
|
||||
|
@ -574,7 +574,7 @@ static void test_msr_filter_allow(void) {
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vm, VCPU_ID);
|
||||
|
||||
vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
|
||||
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
|
||||
|
||||
/* Process guest code userspace exits. */
|
||||
run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
|
||||
@ -588,12 +588,12 @@ static void test_msr_filter_allow(void) {
|
||||
run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
|
||||
run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
|
||||
|
||||
vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
|
||||
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
|
||||
run_guest(vm);
|
||||
vm_handle_exception(vm, UD_VECTOR, NULL);
|
||||
vm_install_exception_handler(vm, UD_VECTOR, NULL);
|
||||
|
||||
if (process_ucall(vm) != UCALL_DONE) {
|
||||
vm_handle_exception(vm, GP_VECTOR, guest_fep_gp_handler);
|
||||
vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
|
||||
|
||||
/* Process emulated rdmsr and wrmsr instructions. */
|
||||
run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
|
||||
|
@ -462,7 +462,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID);
|
||||
vm_handle_exception(vm, IPI_VECTOR, guest_ipi_handler);
|
||||
vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
|
||||
|
||||
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA, 0);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user