selftests: kvm: test enforcement of paravirtual cpuid features
Add a set of tests that ensure the guest cannot access paravirtual msrs and hypercalls that have been disabled in the KVM_CPUID_FEATURES leaf. Expect a #GP in the case of msr accesses and -KVM_ENOSYS from hypercalls. Cc: Jim Mattson <jmattson@google.com> Signed-off-by: Oliver Upton <oupton@google.com> Reviewed-by: Peter Shier <pshier@google.com> Reviewed-by: Aaron Lewis <aaronlewis@google.com> Message-Id: <20201027231044.655110-7-oupton@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
29faeb9632
commit
ac4a4d6de2
1
tools/testing/selftests/kvm/.gitignore
vendored
1
tools/testing/selftests/kvm/.gitignore
vendored
@ -5,6 +5,7 @@
|
||||
/x86_64/cr4_cpuid_sync_test
|
||||
/x86_64/debug_regs
|
||||
/x86_64/evmcs_test
|
||||
/x86_64/kvm_pv_test
|
||||
/x86_64/hyperv_cpuid
|
||||
/x86_64/mmio_warning_test
|
||||
/x86_64/platform_info_test
|
||||
|
@ -41,6 +41,7 @@ LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
|
||||
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
|
||||
|
@ -63,6 +63,9 @@ enum vm_mem_backing_src_type {
|
||||
|
||||
int kvm_check_cap(long cap);
|
||||
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
|
||||
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct kvm_enable_cap *cap);
|
||||
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
|
||||
|
||||
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
|
||||
struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
|
||||
|
@ -362,6 +362,18 @@ void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vm_handle_exception(struct kvm_vm *vm, int vector,
|
||||
void (*handler)(struct ex_regs *));
|
||||
|
||||
/*
|
||||
* set_cpuid() - overwrites a matching cpuid entry with the provided value.
|
||||
* matches based on ent->function && ent->index. returns true
|
||||
* if a match was found and successfully overwritten.
|
||||
* @cpuid: the kvm cpuid list to modify.
|
||||
* @ent: cpuid entry to insert
|
||||
*/
|
||||
bool set_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *ent);
|
||||
|
||||
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
|
||||
uint64_t a3);
|
||||
|
||||
/*
|
||||
* Basic CPU control in CR0
|
||||
*/
|
||||
|
@ -86,6 +86,34 @@ int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* VCPU Enable Capability
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vcpu_id - VCPU
|
||||
* cap - Capability
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: On success, 0. On failure a TEST_ASSERT failure is produced.
|
||||
*
|
||||
* Enables a capability (KVM_CAP_*) on the VCPU.
|
||||
*/
|
||||
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct kvm_enable_cap *cap)
|
||||
{
|
||||
struct vcpu *vcpu = vcpu_find(vm, vcpu_id);
|
||||
int r;
|
||||
|
||||
TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id);
|
||||
|
||||
r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap);
|
||||
TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n"
|
||||
" rc: %i, errno: %i", r, errno);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void vm_open(struct kvm_vm *vm, int perm)
|
||||
{
|
||||
vm->kvm_fd = open(KVM_DEV_PATH, perm);
|
||||
|
@ -1227,3 +1227,32 @@ void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
*data);
|
||||
}
|
||||
}
|
||||
|
||||
bool set_cpuid(struct kvm_cpuid2 *cpuid,
|
||||
struct kvm_cpuid_entry2 *ent)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cpuid->nent; i++) {
|
||||
struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
|
||||
|
||||
if (cur->function != ent->function || cur->index != ent->index)
|
||||
continue;
|
||||
|
||||
memcpy(cur, ent, sizeof(struct kvm_cpuid_entry2));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
|
||||
uint64_t a3)
|
||||
{
|
||||
uint64_t r;
|
||||
|
||||
asm volatile("vmcall"
|
||||
: "=a"(r)
|
||||
: "b"(a0), "c"(a1), "d"(a2), "S"(a3));
|
||||
return r;
|
||||
}
|
||||
|
234
tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
Normal file
234
tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
Normal file
@ -0,0 +1,234 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020, Google LLC.
|
||||
*
|
||||
* Tests for KVM paravirtual feature disablement
|
||||
*/
|
||||
#include <asm/kvm_para.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
extern unsigned char rdmsr_start;
|
||||
extern unsigned char rdmsr_end;
|
||||
|
||||
static u64 do_rdmsr(u32 idx)
|
||||
{
|
||||
u32 lo, hi;
|
||||
|
||||
asm volatile("rdmsr_start: rdmsr;"
|
||||
"rdmsr_end:"
|
||||
: "=a"(lo), "=c"(hi)
|
||||
: "c"(idx));
|
||||
|
||||
return (((u64) hi) << 32) | lo;
|
||||
}
|
||||
|
||||
extern unsigned char wrmsr_start;
|
||||
extern unsigned char wrmsr_end;
|
||||
|
||||
static void do_wrmsr(u32 idx, u64 val)
|
||||
{
|
||||
u32 lo, hi;
|
||||
|
||||
lo = val;
|
||||
hi = val >> 32;
|
||||
|
||||
asm volatile("wrmsr_start: wrmsr;"
|
||||
"wrmsr_end:"
|
||||
: : "a"(lo), "c"(idx), "d"(hi));
|
||||
}
|
||||
|
||||
static int nr_gp;
|
||||
|
||||
static void guest_gp_handler(struct ex_regs *regs)
|
||||
{
|
||||
unsigned char *rip = (unsigned char *)regs->rip;
|
||||
bool r, w;
|
||||
|
||||
r = rip == &rdmsr_start;
|
||||
w = rip == &wrmsr_start;
|
||||
GUEST_ASSERT(r || w);
|
||||
|
||||
nr_gp++;
|
||||
|
||||
if (r)
|
||||
regs->rip = (uint64_t)&rdmsr_end;
|
||||
else
|
||||
regs->rip = (uint64_t)&wrmsr_end;
|
||||
}
|
||||
|
||||
struct msr_data {
|
||||
uint32_t idx;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#define TEST_MSR(msr) { .idx = msr, .name = #msr }
|
||||
#define UCALL_PR_MSR 0xdeadbeef
|
||||
#define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr)
|
||||
|
||||
/*
|
||||
* KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or
|
||||
* written, as the KVM_CPUID_FEATURES leaf is cleared.
|
||||
*/
|
||||
static struct msr_data msrs_to_test[] = {
|
||||
TEST_MSR(MSR_KVM_SYSTEM_TIME),
|
||||
TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW),
|
||||
TEST_MSR(MSR_KVM_WALL_CLOCK),
|
||||
TEST_MSR(MSR_KVM_WALL_CLOCK_NEW),
|
||||
TEST_MSR(MSR_KVM_ASYNC_PF_EN),
|
||||
TEST_MSR(MSR_KVM_STEAL_TIME),
|
||||
TEST_MSR(MSR_KVM_PV_EOI_EN),
|
||||
TEST_MSR(MSR_KVM_POLL_CONTROL),
|
||||
TEST_MSR(MSR_KVM_ASYNC_PF_INT),
|
||||
TEST_MSR(MSR_KVM_ASYNC_PF_ACK),
|
||||
};
|
||||
|
||||
static void test_msr(struct msr_data *msr)
|
||||
{
|
||||
PR_MSR(msr);
|
||||
do_rdmsr(msr->idx);
|
||||
GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
|
||||
|
||||
nr_gp = 0;
|
||||
do_wrmsr(msr->idx, 0);
|
||||
GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
|
||||
nr_gp = 0;
|
||||
}
|
||||
|
||||
struct hcall_data {
|
||||
uint64_t nr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#define TEST_HCALL(hc) { .nr = hc, .name = #hc }
|
||||
#define UCALL_PR_HCALL 0xdeadc0de
|
||||
#define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc)
|
||||
|
||||
/*
|
||||
* KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding
|
||||
* features have been cleared in KVM_CPUID_FEATURES.
|
||||
*/
|
||||
static struct hcall_data hcalls_to_test[] = {
|
||||
TEST_HCALL(KVM_HC_KICK_CPU),
|
||||
TEST_HCALL(KVM_HC_SEND_IPI),
|
||||
TEST_HCALL(KVM_HC_SCHED_YIELD),
|
||||
};
|
||||
|
||||
static void test_hcall(struct hcall_data *hc)
|
||||
{
|
||||
uint64_t r;
|
||||
|
||||
PR_HCALL(hc);
|
||||
r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
|
||||
GUEST_ASSERT(r == -KVM_ENOSYS);
|
||||
}
|
||||
|
||||
static void guest_main(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) {
|
||||
test_msr(&msrs_to_test[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) {
|
||||
test_hcall(&hcalls_to_test[i]);
|
||||
}
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void clear_kvm_cpuid_features(struct kvm_cpuid2 *cpuid)
|
||||
{
|
||||
struct kvm_cpuid_entry2 ent = {0};
|
||||
|
||||
ent.function = KVM_CPUID_FEATURES;
|
||||
TEST_ASSERT(set_cpuid(cpuid, &ent),
|
||||
"failed to clear KVM_CPUID_FEATURES leaf");
|
||||
}
|
||||
|
||||
static void pr_msr(struct ucall *uc)
|
||||
{
|
||||
struct msr_data *msr = (struct msr_data *)uc->args[0];
|
||||
|
||||
pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx);
|
||||
}
|
||||
|
||||
static void pr_hcall(struct ucall *uc)
|
||||
{
|
||||
struct hcall_data *hc = (struct hcall_data *)uc->args[0];
|
||||
|
||||
pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
|
||||
}
|
||||
|
||||
static void handle_abort(struct ucall *uc)
|
||||
{
|
||||
TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
|
||||
__FILE__, uc->args[1]);
|
||||
}
|
||||
|
||||
#define VCPU_ID 0
|
||||
|
||||
static void enter_guest(struct kvm_vm *vm)
|
||||
{
|
||||
struct kvm_run *run;
|
||||
struct ucall uc;
|
||||
int r;
|
||||
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
|
||||
while (true) {
|
||||
r = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
||||
"unexpected exit reason: %u (%s)",
|
||||
run->exit_reason, exit_reason_str(run->exit_reason));
|
||||
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_PR_MSR:
|
||||
pr_msr(&uc);
|
||||
break;
|
||||
case UCALL_PR_HCALL:
|
||||
pr_hcall(&uc);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
handle_abort(&uc);
|
||||
return;
|
||||
case UCALL_DONE:
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
struct kvm_enable_cap cap = {0};
|
||||
struct kvm_cpuid2 *best;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
|
||||
pr_info("will skip kvm paravirt restriction tests.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm = vm_create_default(VCPU_ID, 0, guest_main);
|
||||
|
||||
cap.cap = KVM_CAP_ENFORCE_PV_FEATURE_CPUID;
|
||||
cap.args[0] = 1;
|
||||
vcpu_enable_cap(vm, VCPU_ID, &cap);
|
||||
|
||||
best = kvm_get_supported_cpuid();
|
||||
clear_kvm_cpuid_features(best);
|
||||
vcpu_set_cpuid(vm, VCPU_ID, best);
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vm, VCPU_ID);
|
||||
vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
|
||||
|
||||
enter_guest(vm);
|
||||
kvm_vm_free(vm);
|
||||
}
|
Loading…
Reference in New Issue
Block a user