KVM: selftests: Add TEST_REQUIRE macros to reduce skipping copy+paste
Add TEST_REQUIRE() and __TEST_REQUIRE() to replace the myriad open coded instances of selftests exiting with KSFT_SKIP after printing an informational message. In addition to reducing the amount of boilerplate code in selftests, the UPPERCASE macro names make it easier to visually identify a test's requirements. Convert usage that erroneously uses something other than print_skip() and/or "exits" with '0' or some other non-KSFT_SKIP value. Intentionally drop a kvm_vm_free() in aarch64/debug-exceptions.c as part of the conversion. All memory and file descriptors are freed on process exit, so the explicit free is superfluous. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
3ea9b80965
commit
7ed397d107
tools/testing/selftests/kvm
aarch64
access_tracking_perf_test.cinclude
kvm_binary_stats_test.ckvm_create_max_vcpus.clib
rseq_test.cs390x
steal_time.csystem_counter_offset_test.cx86_64
amx_test.ccr4_cpuid_sync_test.cdebug_regs.cemulator_error_test.cevmcs_test.cfix_hypercall_test.cget_msr_index_features.chyperv_cpuid.chyperv_svm_test.ckvm_clock_test.ckvm_pv_test.cmmio_warning_test.cmmu_role_test.cplatform_info_test.cpmu_event_filter_test.cset_boot_cpu_id.csev_migrate_tests.csync_regs_test.ctriple_fault_event_test.ctsc_scaling_sync.cvmx_exception_with_invalid_guest_state.cvmx_nested_tsc_scaling_test.cvmx_pmu_caps_test.cvmx_preemption_timer_test.cvmx_set_nested_state_test.cxen_shinfo_test.cxen_vmcall_test.cxss_msr_test.c
@ -375,10 +375,7 @@ static struct kvm_vm *test_vm_create(void)
|
||||
ucall_init(vm, NULL);
|
||||
test_init_timer_irq(vm);
|
||||
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
|
||||
if (gic_fd < 0) {
|
||||
print_skip("Failed to create vgic-v3");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
|
||||
|
||||
/* Make all the test's cmdline args visible to the guest */
|
||||
sync_global_to_guest(vm, test_args);
|
||||
@ -468,10 +465,8 @@ int main(int argc, char *argv[])
|
||||
if (!parse_args(argc, argv))
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
if (test_args.migration_freq_ms && get_nprocs() < 2) {
|
||||
print_skip("At least two physical CPUs needed for vCPU migration");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
|
||||
"At least two physical CPUs needed for vCPU migration");
|
||||
|
||||
vm = test_vm_create();
|
||||
test_run(vm);
|
||||
|
@ -259,11 +259,8 @@ int main(int argc, char *argv[])
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
||||
if (debug_version(vcpu) < 6) {
|
||||
print_skip("Armv8 debug architecture not supported.");
|
||||
kvm_vm_free(vm);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
__TEST_REQUIRE(debug_version(vcpu) >= 6,
|
||||
"Armv8 debug architecture not supported.");
|
||||
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_BRK_INS, guest_sw_bp_handler);
|
||||
|
@ -395,10 +395,12 @@ static void check_supported(struct vcpu_config *c)
|
||||
struct reg_sublist *s;
|
||||
|
||||
for_each_sublist(c, s) {
|
||||
if (s->capability && !kvm_has_cap(s->capability)) {
|
||||
fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
if (!s->capability)
|
||||
continue;
|
||||
|
||||
__TEST_REQUIRE(kvm_has_cap(s->capability),
|
||||
"%s: %s not available, skipping tests\n",
|
||||
config_name(c), s->name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,10 +192,7 @@ static void host_test_system_suspend(void)
|
||||
|
||||
int main(void)
|
||||
{
|
||||
if (!kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)) {
|
||||
print_skip("KVM_CAP_ARM_SYSTEM_SUSPEND not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
|
||||
|
||||
host_test_cpu_on();
|
||||
host_test_system_suspend();
|
||||
|
@ -82,10 +82,7 @@ int main(void)
|
||||
struct kvm_vm *vm;
|
||||
int ret;
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_ARM_EL1_32BIT)) {
|
||||
print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL1_32BIT));
|
||||
|
||||
/* Get the preferred target type and copy that to init1 for later use */
|
||||
vm = vm_create_barebones();
|
||||
|
@ -703,13 +703,9 @@ int main(int ac, char **av)
|
||||
}
|
||||
|
||||
ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
if (!ret) {
|
||||
pr_info("Running GIC_v2 tests.\n");
|
||||
run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
return 0;
|
||||
}
|
||||
__TEST_REQUIRE(!ret, "No GICv2 nor GICv3 support");
|
||||
|
||||
print_skip("No GICv2 nor GICv3 support");
|
||||
exit(KSFT_SKIP);
|
||||
pr_info("Running GIC_v2 tests.\n");
|
||||
run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
return 0;
|
||||
}
|
||||
|
@ -768,10 +768,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
|
||||
|
||||
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
|
||||
GICD_BASE_GPA, GICR_BASE_GPA);
|
||||
if (gic_fd < 0) {
|
||||
print_skip("Failed to create vgic-v3, skipping");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
|
||||
|
||||
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
|
||||
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
|
||||
|
@ -104,10 +104,7 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
|
||||
return 0;
|
||||
|
||||
pfn = entry & PAGEMAP_PFN_MASK;
|
||||
if (!pfn) {
|
||||
print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
__TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN");
|
||||
|
||||
return pfn;
|
||||
}
|
||||
@ -380,10 +377,8 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
|
||||
if (page_idle_fd < 0) {
|
||||
print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
__TEST_REQUIRE(page_idle_fd >= 0,
|
||||
"CONFIG_IDLE_PAGE_TRACKING is not enabled");
|
||||
close(page_idle_fd);
|
||||
|
||||
for_each_guest_mode(run_test, ¶ms);
|
||||
|
@ -34,6 +34,15 @@ static inline int _no_printf(const char *format, ...) { return 0; }
|
||||
#endif
|
||||
|
||||
void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
|
||||
#define __TEST_REQUIRE(f, fmt, ...) \
|
||||
do { \
|
||||
if (!(f)) { \
|
||||
print_skip(fmt, ##__VA_ARGS__); \
|
||||
exit(KSFT_SKIP); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TEST_REQUIRE(f) __TEST_REQUIRE(f, "Requirement not met: %s", #f)
|
||||
|
||||
ssize_t test_write(int fd, const void *buf, size_t count);
|
||||
ssize_t test_read(int fd, void *buf, size_t count);
|
||||
|
@ -213,10 +213,7 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
/* Check the extension for binary stats */
|
||||
if (!kvm_has_cap(KVM_CAP_BINARY_STATS_FD)) {
|
||||
print_skip("Binary form statistics interface is not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
|
||||
|
||||
/* Create VMs and VCPUs */
|
||||
vms = malloc(sizeof(vms[0]) * max_vm);
|
||||
|
@ -64,11 +64,9 @@ int main(int argc, char *argv[])
|
||||
rl.rlim_max = nr_fds_wanted;
|
||||
|
||||
int r = setrlimit(RLIMIT_NOFILE, &rl);
|
||||
if (r < 0) {
|
||||
printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
|
||||
__TEST_REQUIRE(r >= 0,
|
||||
"RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
|
||||
old_rlim_max, nr_fds_wanted);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
} else {
|
||||
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
|
||||
}
|
||||
|
@ -26,10 +26,7 @@ int open_path_or_exit(const char *path, int flags)
|
||||
int fd;
|
||||
|
||||
fd = open(path, flags);
|
||||
if (fd < 0) {
|
||||
print_skip("%s not available (errno: %d)", path, errno);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
__TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
|
||||
|
||||
return fd;
|
||||
}
|
||||
@ -93,10 +90,7 @@ static void vm_open(struct kvm_vm *vm)
|
||||
{
|
||||
vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)) {
|
||||
print_skip("immediate_exit not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
|
||||
|
||||
vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
|
||||
TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
|
||||
|
@ -609,14 +609,14 @@ void vm_xsave_req_perm(int bit)
|
||||
kvm_fd = open_kvm_dev_path_or_exit();
|
||||
rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
|
||||
close(kvm_fd);
|
||||
|
||||
if (rc == -1 && (errno == ENXIO || errno == EINVAL))
|
||||
exit(KSFT_SKIP);
|
||||
TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
|
||||
if (!(bitmask & (1ULL << bit)))
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
if (!is_xfd_supported())
|
||||
exit(KSFT_SKIP);
|
||||
TEST_REQUIRE(bitmask & (1ULL << bit));
|
||||
|
||||
TEST_REQUIRE(is_xfd_supported());
|
||||
|
||||
rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
|
||||
|
||||
|
@ -174,10 +174,7 @@ bool nested_svm_supported(void)
|
||||
|
||||
void nested_svm_check_supported(void)
|
||||
{
|
||||
if (!nested_svm_supported()) {
|
||||
print_skip("nested SVM not enabled");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(nested_svm_supported());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -391,10 +391,7 @@ bool nested_vmx_supported(void)
|
||||
|
||||
void nested_vmx_check_supported(void)
|
||||
{
|
||||
if (!nested_vmx_supported()) {
|
||||
print_skip("nested VMX not enabled");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(nested_vmx_supported());
|
||||
}
|
||||
|
||||
static void nested_create_pte(struct kvm_vm *vm,
|
||||
|
@ -171,12 +171,11 @@ static void *migration_worker(void *ign)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int calc_min_max_cpu(void)
|
||||
static void calc_min_max_cpu(void)
|
||||
{
|
||||
int i, cnt, nproc;
|
||||
|
||||
if (CPU_COUNT(&possible_mask) < 2)
|
||||
return -EINVAL;
|
||||
TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2);
|
||||
|
||||
/*
|
||||
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
|
||||
@ -198,7 +197,8 @@ static int calc_min_max_cpu(void)
|
||||
cnt++;
|
||||
}
|
||||
|
||||
return (cnt < 2) ? -EINVAL : 0;
|
||||
__TEST_REQUIRE(cnt >= 2,
|
||||
"Only one usable CPU, task migration not possible");
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
@ -215,10 +215,7 @@ int main(int argc, char *argv[])
|
||||
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
|
||||
strerror(errno));
|
||||
|
||||
if (calc_min_max_cpu()) {
|
||||
print_skip("Only one usable CPU, task migration not possible");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
calc_min_max_cpu();
|
||||
|
||||
sys_rseq(0);
|
||||
|
||||
|
@ -756,20 +756,17 @@ struct testdef {
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int memop_cap, extension_cap, idx;
|
||||
int extension_cap, idx;
|
||||
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
|
||||
|
||||
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
|
||||
|
||||
ksft_print_header();
|
||||
|
||||
memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
|
||||
extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
|
||||
if (!memop_cap) {
|
||||
ksft_exit_skip("CAP_S390_MEM_OP not supported.\n");
|
||||
}
|
||||
|
||||
ksft_set_plan(ARRAY_SIZE(testlist));
|
||||
|
||||
extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
|
||||
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
|
||||
if (testlist[idx].extension >= extension_cap) {
|
||||
testlist[idx].test();
|
||||
|
@ -229,14 +229,13 @@ int main(int argc, char *argv[])
|
||||
struct kvm_vm *vm;
|
||||
int idx;
|
||||
|
||||
TEST_REQUIRE(kvm_check_cap(KVM_CAP_SYNC_REGS));
|
||||
|
||||
/* Tell stdout not to buffer its content */
|
||||
setbuf(stdout, NULL);
|
||||
|
||||
ksft_print_header();
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_SYNC_REGS))
|
||||
ksft_exit_skip("CAP_SYNC_REGS not supported");
|
||||
|
||||
ksft_set_plan(ARRAY_SIZE(testlist));
|
||||
|
||||
/* Create VM */
|
||||
|
@ -271,10 +271,7 @@ int main(int ac, char **av)
|
||||
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
|
||||
ucall_init(vm, NULL);
|
||||
|
||||
if (!is_steal_time_supported(vcpus[0])) {
|
||||
print_skip("steal-time not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
|
||||
|
||||
/* Run test on each VCPU */
|
||||
for (i = 0; i < NR_VCPUS; ++i) {
|
||||
|
@ -28,11 +28,9 @@ static struct test_case test_cases[] = {
|
||||
|
||||
static void check_preconditions(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET))
|
||||
return;
|
||||
|
||||
print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test");
|
||||
exit(KSFT_SKIP);
|
||||
__TEST_REQUIRE(!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL,
|
||||
KVM_VCPU_TSC_OFFSET),
|
||||
"KVM_VCPU_TSC_OFFSET not supported; skipping test");
|
||||
}
|
||||
|
||||
static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
|
||||
|
@ -317,7 +317,6 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
struct kvm_regs regs1, regs2;
|
||||
bool amx_supported = false;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
@ -334,21 +333,15 @@ int main(int argc, char *argv[])
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
entry = kvm_get_supported_cpuid_entry(1);
|
||||
if (!(entry->ecx & X86_FEATURE_XSAVE)) {
|
||||
print_skip("XSAVE feature not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE);
|
||||
|
||||
if (kvm_get_cpuid_max_basic() >= 0xd) {
|
||||
entry = kvm_get_supported_cpuid_index(0xd, 0);
|
||||
amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE);
|
||||
if (!amx_supported) {
|
||||
print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
/* Get xsave/restore max size */
|
||||
xsave_restore_size = entry->ecx;
|
||||
}
|
||||
TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd);
|
||||
|
||||
entry = kvm_get_supported_cpuid_index(0xd, 0);
|
||||
TEST_REQUIRE(entry->eax & XFEATURE_MASK_XTILE);
|
||||
|
||||
/* Get xsave/restore max size */
|
||||
xsave_restore_size = entry->ecx;
|
||||
|
||||
run = vcpu->run;
|
||||
vcpu_regs_get(vcpu, ®s1);
|
||||
|
@ -70,10 +70,7 @@ int main(int argc, char *argv[])
|
||||
struct ucall uc;
|
||||
|
||||
entry = kvm_get_supported_cpuid_entry(1);
|
||||
if (!(entry->ecx & X86_FEATURE_XSAVE)) {
|
||||
print_skip("XSAVE feature not supported");
|
||||
return 0;
|
||||
}
|
||||
TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE);
|
||||
|
||||
/* Tell stdout not to buffer its content */
|
||||
setbuf(stdout, NULL);
|
||||
|
@ -95,10 +95,7 @@ int main(void)
|
||||
1, /* cli */
|
||||
};
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG)) {
|
||||
print_skip("KVM_CAP_SET_GUEST_DEBUG not supported");
|
||||
return 0;
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
run = vcpu->run;
|
||||
|
@ -162,10 +162,7 @@ int main(int argc, char *argv[])
|
||||
/* Tell stdout not to buffer its content */
|
||||
setbuf(stdout, NULL);
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR)) {
|
||||
printf("module parameter 'allow_smaller_maxphyaddr' is not set. Skipping test.\n");
|
||||
return 0;
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
|
@ -208,12 +208,9 @@ int main(int argc, char *argv[])
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
if (!nested_vmx_supported() ||
|
||||
!kvm_has_cap(KVM_CAP_NESTED_STATE) ||
|
||||
!kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
|
||||
print_skip("Enlightened VMCS is unsupported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(nested_vmx_supported());
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
|
||||
|
||||
vcpu_set_hv_cpuid(vcpu);
|
||||
vcpu_enable_evmcs(vcpu);
|
||||
|
@ -156,10 +156,7 @@ static void test_fix_hypercall_disabled(void)
|
||||
|
||||
int main(void)
|
||||
{
|
||||
if (!(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
|
||||
print_skip("KVM_X86_QUIRK_HYPERCALL_INSN not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
|
||||
|
||||
test_fix_hypercall();
|
||||
test_fix_hypercall_disabled();
|
||||
|
@ -25,10 +25,7 @@ int main(int argc, char *argv[])
|
||||
* will cover the "regular" list of MSRs, the coverage here is purely
|
||||
* opportunistic and not interesting on its own.
|
||||
*/
|
||||
if (!kvm_check_cap(KVM_CAP_GET_MSR_FEATURES)) {
|
||||
print_skip("KVM_CAP_GET_MSR_FEATURES not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_GET_MSR_FEATURES));
|
||||
|
||||
(void)kvm_get_msr_index_list();
|
||||
|
||||
|
@ -137,10 +137,7 @@ int main(int argc, char *argv[])
|
||||
/* Tell stdout not to buffer its content */
|
||||
setbuf(stdout, NULL);
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_HYPERV_CPUID)) {
|
||||
print_skip("KVM_CAP_HYPERV_CPUID not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
|
@ -127,10 +127,8 @@ int main(int argc, char *argv[])
|
||||
struct ucall uc;
|
||||
int stage;
|
||||
|
||||
if (!nested_svm_supported()) {
|
||||
print_skip("Nested SVM not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(nested_svm_supported());
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
vcpu_set_hv_cpuid(vcpu);
|
||||
|
@ -181,11 +181,7 @@ int main(void)
|
||||
int flags;
|
||||
|
||||
flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
|
||||
if (!(flags & KVM_CLOCK_REALTIME)) {
|
||||
print_skip("KVM_CLOCK_REALTIME not supported; flags: %x",
|
||||
flags);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
|
||||
|
||||
check_clocksource();
|
||||
|
||||
|
@ -204,10 +204,7 @@ int main(void)
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
|
||||
print_skip("KVM_CAP_ENFORCE_PV_FEATURE_CPUID not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
|
||||
|
||||
|
@ -93,15 +93,9 @@ int main(void)
|
||||
{
|
||||
int warnings_before, warnings_after;
|
||||
|
||||
if (!is_intel_cpu()) {
|
||||
print_skip("Must be run on an Intel CPU");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(is_intel_cpu());
|
||||
|
||||
if (vm_is_unrestricted_guest(NULL)) {
|
||||
print_skip("Unrestricted guest must be disabled");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
|
||||
|
||||
warnings_before = get_warnings_count();
|
||||
|
||||
|
@ -117,16 +117,10 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
}
|
||||
|
||||
if (!do_gbpages && !do_maxphyaddr) {
|
||||
print_skip("No sub-tests selected");
|
||||
return 0;
|
||||
}
|
||||
__TEST_REQUIRE(do_gbpages || do_maxphyaddr, "No sub-tests selected");
|
||||
|
||||
entry = kvm_get_supported_cpuid_entry(0x80000001);
|
||||
if (!(entry->edx & CPUID_GBPAGES)) {
|
||||
print_skip("1gb hugepages not supported");
|
||||
return 0;
|
||||
}
|
||||
TEST_REQUIRE(entry->edx & CPUID_GBPAGES);
|
||||
|
||||
if (do_gbpages) {
|
||||
pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
|
||||
|
@ -70,17 +70,12 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
int rv;
|
||||
uint64_t msr_platform_info;
|
||||
|
||||
/* Tell stdout not to buffer its content */
|
||||
setbuf(stdout, NULL);
|
||||
|
||||
rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
|
||||
if (!rv) {
|
||||
print_skip("KVM_CAP_MSR_PLATFORM_INFO not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
|
@ -443,39 +443,24 @@ static bool use_amd_pmu(void)
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
void (*guest_code)(void) = NULL;
|
||||
void (*guest_code)(void);
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
int r;
|
||||
|
||||
/* Tell stdout not to buffer its content */
|
||||
setbuf(stdout, NULL);
|
||||
|
||||
r = kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER);
|
||||
if (!r) {
|
||||
print_skip("KVM_CAP_PMU_EVENT_FILTER not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER));
|
||||
|
||||
if (use_intel_pmu())
|
||||
guest_code = intel_guest_code;
|
||||
else if (use_amd_pmu())
|
||||
guest_code = amd_guest_code;
|
||||
|
||||
if (!guest_code) {
|
||||
print_skip("Don't know how to test this guest PMU");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
|
||||
guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
||||
if (!sanity_check_pmu(vcpu)) {
|
||||
print_skip("Guest PMU is not functional");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(sanity_check_pmu(vcpu));
|
||||
|
||||
if (use_amd_pmu())
|
||||
test_amd_deny_list(vcpu);
|
||||
|
@ -123,10 +123,7 @@ static void check_set_bsp_busy(void)
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
if (!kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID)) {
|
||||
print_skip("set_boot_cpu_id not available");
|
||||
return 0;
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID));
|
||||
|
||||
run_vm_bsp(0);
|
||||
run_vm_bsp(1);
|
||||
|
@ -400,22 +400,15 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_cpuid_entry2 *cpuid;
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM) &&
|
||||
!kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
|
||||
print_skip("Capabilities not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM));
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM));
|
||||
|
||||
cpuid = kvm_get_supported_cpuid_entry(0x80000000);
|
||||
if (cpuid->eax < 0x8000001f) {
|
||||
print_skip("AMD memory encryption not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(cpuid->eax >= 0x8000001f);
|
||||
|
||||
cpuid = kvm_get_supported_cpuid_entry(0x8000001f);
|
||||
if (!(cpuid->eax & X86_FEATURE_SEV)) {
|
||||
print_skip("AMD SEV not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(cpuid->eax & X86_FEATURE_SEV);
|
||||
|
||||
have_sev_es = !!(cpuid->eax & X86_FEATURE_SEV_ES);
|
||||
|
||||
if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
|
||||
|
@ -94,14 +94,8 @@ int main(int argc, char *argv[])
|
||||
setbuf(stdout, NULL);
|
||||
|
||||
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
|
||||
if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
|
||||
print_skip("KVM_CAP_SYNC_REGS not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
if ((cap & INVALID_SYNC_FIELD) != 0) {
|
||||
print_skip("The \"invalid\" field is not invalid");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
|
||||
TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
|
@ -46,15 +46,9 @@ int main(void)
|
||||
vm_vaddr_t vmx_pages_gva;
|
||||
struct ucall uc;
|
||||
|
||||
if (!nested_vmx_supported()) {
|
||||
print_skip("Nested VMX not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
nested_vmx_check_supported();
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT)) {
|
||||
print_skip("KVM_CAP_X86_TRIPLE_FAULT_EVENT not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
|
||||
vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
|
||||
|
@ -93,10 +93,7 @@ static void *run_vcpu(void *_cpu_nr)
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
if (!kvm_has_cap(KVM_CAP_VM_TSC_CONTROL)) {
|
||||
print_skip("KVM_CAP_VM_TSC_CONTROL not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_TSC_CONTROL));
|
||||
|
||||
vm = vm_create(NR_TEST_VCPUS);
|
||||
vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
|
||||
|
@ -111,10 +111,8 @@ int main(int argc, char *argv[])
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
if (!is_intel_cpu() || vm_is_unrestricted_guest(NULL)) {
|
||||
print_skip("Must be run with kvm_intel.unrestricted_guest=0");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(is_intel_cpu());
|
||||
TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
get_set_sigalrm_vcpu(vcpu);
|
||||
|
@ -116,14 +116,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void tsc_scaling_check_supported(void)
|
||||
{
|
||||
if (!kvm_has_cap(KVM_CAP_TSC_CONTROL)) {
|
||||
print_skip("TSC scaling not supported by the HW");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
}
|
||||
|
||||
static void stable_tsc_check_supported(void)
|
||||
{
|
||||
FILE *fp;
|
||||
@ -159,7 +151,7 @@ int main(int argc, char *argv[])
|
||||
uint64_t l2_tsc_freq = 0;
|
||||
|
||||
nested_vmx_check_supported();
|
||||
tsc_scaling_check_supported();
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
|
||||
stable_tsc_check_supported();
|
||||
|
||||
/*
|
||||
|
@ -57,7 +57,6 @@ int main(int argc, char *argv[])
|
||||
struct kvm_cpuid2 *cpuid;
|
||||
struct kvm_cpuid_entry2 *entry_1_0;
|
||||
struct kvm_cpuid_entry2 *entry_a_0;
|
||||
bool pdcm_supported = false;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int ret;
|
||||
@ -71,20 +70,14 @@ int main(int argc, char *argv[])
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
cpuid = kvm_get_supported_cpuid();
|
||||
|
||||
if (kvm_get_cpuid_max_basic() >= 0xa) {
|
||||
entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
|
||||
entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0);
|
||||
pdcm_supported = entry_1_0 && !!(entry_1_0->ecx & X86_FEATURE_PDCM);
|
||||
eax.full = entry_a_0->eax;
|
||||
}
|
||||
if (!pdcm_supported) {
|
||||
print_skip("MSR_IA32_PERF_CAPABILITIES is not supported by the vCPU");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
if (!eax.split.version_id) {
|
||||
print_skip("PMU is not supported by the vCPU");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xa);
|
||||
|
||||
entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
|
||||
entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0);
|
||||
TEST_REQUIRE(entry_1_0->ecx & X86_FEATURE_PDCM);
|
||||
|
||||
eax.full = entry_a_0->eax;
|
||||
__TEST_REQUIRE(eax.split.version_id, "PMU is not supported by the vCPU");
|
||||
|
||||
/* testcase 1, set capabilities when we have PDCM bit */
|
||||
vcpu_set_cpuid(vcpu, cpuid);
|
||||
|
@ -169,10 +169,7 @@ int main(int argc, char *argv[])
|
||||
*/
|
||||
nested_vmx_check_supported();
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) {
|
||||
print_skip("KVM_CAP_NESTED_STATE not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
@ -267,10 +267,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) {
|
||||
print_skip("KVM_CAP_NESTED_STATE not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
|
||||
|
||||
/*
|
||||
* AMD currently does not implement set_nested_state, so for now we
|
||||
|
@ -362,10 +362,7 @@ int main(int argc, char *argv[])
|
||||
!strncmp(argv[1], "--verbose", 10));
|
||||
|
||||
int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
|
||||
if (!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO) ) {
|
||||
print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO);
|
||||
|
||||
bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE);
|
||||
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
|
||||
|
@ -80,14 +80,12 @@ static void guest_code(void)
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
unsigned int xen_caps;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
if (!(kvm_check_cap(KVM_CAP_XEN_HVM) &
|
||||
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) {
|
||||
print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
|
||||
TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
vcpu_set_hv_cpuid(vcpu);
|
||||
|
@ -19,7 +19,6 @@
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
bool xss_supported = false;
|
||||
bool xss_in_msr_list;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
@ -29,14 +28,10 @@ int main(int argc, char *argv[])
|
||||
/* Create VM */
|
||||
vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||
|
||||
if (kvm_get_cpuid_max_basic() >= 0xd) {
|
||||
entry = kvm_get_supported_cpuid_index(0xd, 1);
|
||||
xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES);
|
||||
}
|
||||
if (!xss_supported) {
|
||||
print_skip("IA32_XSS is not supported by the vCPU");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd);
|
||||
|
||||
entry = kvm_get_supported_cpuid_index(0xd, 1);
|
||||
TEST_REQUIRE(entry->eax & X86_FEATURE_XSAVES);
|
||||
|
||||
xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
|
||||
TEST_ASSERT(xss_val == 0,
|
||||
|
Loading…
Reference in New Issue
Block a user