kvm: selftests: Add support for KVM_CAP_XSAVE2
When KVM_CAP_XSAVE2 is supported, userspace is expected to allocate buffer for KVM_GET_XSAVE2 and KVM_SET_XSAVE using the size returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2). Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Guang Zeng <guang.zeng@intel.com> Signed-off-by: Jing Liu <jing2.liu@intel.com> Signed-off-by: Yang Zhong <yang.zhong@intel.com> Message-Id: <20220105123532.12586-20-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
be50b2065d
commit
415a3c33e8
@ -373,9 +373,23 @@ struct kvm_debugregs {
|
|||||||
__u64 reserved[9];
|
__u64 reserved[9];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* for KVM_CAP_XSAVE */
|
/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */
|
||||||
struct kvm_xsave {
|
struct kvm_xsave {
|
||||||
|
/*
|
||||||
|
* KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes
|
||||||
|
* as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
|
||||||
|
* respectively, when invoked on the vm file descriptor.
|
||||||
|
*
|
||||||
|
* The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
|
||||||
|
* will always be at least 4096. Currently, it is only greater
|
||||||
|
* than 4096 if a dynamic feature has been enabled with
|
||||||
|
* ``arch_prctl()``, but this may change in the future.
|
||||||
|
*
|
||||||
|
* The offsets of the state save areas in struct kvm_xsave follow
|
||||||
|
* the contents of CPUID leaf 0xD on the host.
|
||||||
|
*/
|
||||||
__u32 region[1024];
|
__u32 region[1024];
|
||||||
|
__u32 extra[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
#define KVM_MAX_XCRS 16
|
#define KVM_MAX_XCRS 16
|
||||||
|
@ -1131,6 +1131,7 @@ struct kvm_ppc_resize_hpt {
|
|||||||
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
|
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
|
||||||
#define KVM_CAP_ARM_MTE 205
|
#define KVM_CAP_ARM_MTE 205
|
||||||
#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
|
#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
|
||||||
|
#define KVM_CAP_XSAVE2 207
|
||||||
|
|
||||||
#ifdef KVM_CAP_IRQ_ROUTING
|
#ifdef KVM_CAP_IRQ_ROUTING
|
||||||
|
|
||||||
@ -1551,6 +1552,8 @@ struct kvm_s390_ucas_mapping {
|
|||||||
/* Available with KVM_CAP_XSAVE */
|
/* Available with KVM_CAP_XSAVE */
|
||||||
#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
|
#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
|
||||||
#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
|
#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
|
||||||
|
/* Available with KVM_CAP_XSAVE2 */
|
||||||
|
#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
|
||||||
/* Available with KVM_CAP_XCRS */
|
/* Available with KVM_CAP_XCRS */
|
||||||
#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
|
#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
|
||||||
#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
|
#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
|
||||||
|
@ -103,6 +103,7 @@ extern const struct vm_guest_mode_params vm_guest_mode_params[];
|
|||||||
int open_path_or_exit(const char *path, int flags);
|
int open_path_or_exit(const char *path, int flags);
|
||||||
int open_kvm_dev_path_or_exit(void);
|
int open_kvm_dev_path_or_exit(void);
|
||||||
int kvm_check_cap(long cap);
|
int kvm_check_cap(long cap);
|
||||||
|
int vm_check_cap(struct kvm_vm *vm, long cap);
|
||||||
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
|
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
|
||||||
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
|
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||||
struct kvm_enable_cap *cap);
|
struct kvm_enable_cap *cap);
|
||||||
@ -344,6 +345,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
|
|||||||
* guest_code - The vCPU's entry point
|
* guest_code - The vCPU's entry point
|
||||||
*/
|
*/
|
||||||
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
|
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
|
||||||
|
void vm_xsave_req_perm(void);
|
||||||
|
|
||||||
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
|
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
|
||||||
|
|
||||||
|
@ -10,8 +10,10 @@
|
|||||||
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
#include <syscall.h>
|
||||||
|
|
||||||
#include <asm/msr-index.h>
|
#include <asm/msr-index.h>
|
||||||
|
#include <asm/prctl.h>
|
||||||
|
|
||||||
#include "../kvm_util.h"
|
#include "../kvm_util.h"
|
||||||
|
|
||||||
@ -352,6 +354,7 @@ struct kvm_x86_state;
|
|||||||
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
|
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
|
||||||
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
|
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
|
||||||
struct kvm_x86_state *state);
|
struct kvm_x86_state *state);
|
||||||
|
void kvm_x86_state_cleanup(struct kvm_x86_state *state);
|
||||||
|
|
||||||
struct kvm_msr_list *kvm_get_msr_index_list(void);
|
struct kvm_msr_list *kvm_get_msr_index_list(void);
|
||||||
uint64_t kvm_get_feature_msr(uint64_t msr_index);
|
uint64_t kvm_get_feature_msr(uint64_t msr_index);
|
||||||
@ -443,4 +446,11 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
|||||||
/* VMX_EPT_VPID_CAP bits */
|
/* VMX_EPT_VPID_CAP bits */
|
||||||
#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
|
#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
|
||||||
|
|
||||||
|
#define XSTATE_XTILE_CFG_BIT 17
|
||||||
|
#define XSTATE_XTILE_DATA_BIT 18
|
||||||
|
|
||||||
|
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
|
||||||
|
#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
|
||||||
|
#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK | \
|
||||||
|
XSTATE_XTILE_DATA_MASK)
|
||||||
#endif /* SELFTEST_KVM_PROCESSOR_H */
|
#endif /* SELFTEST_KVM_PROCESSOR_H */
|
||||||
|
@ -85,6 +85,33 @@ int kvm_check_cap(long cap)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* VM Check Capability
|
||||||
|
*
|
||||||
|
* Input Args:
|
||||||
|
* vm - Virtual Machine
|
||||||
|
* cap - Capability
|
||||||
|
*
|
||||||
|
* Output Args: None
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* On success, the Value corresponding to the capability (KVM_CAP_*)
|
||||||
|
* specified by the value of cap. On failure a TEST_ASSERT failure
|
||||||
|
* is produced.
|
||||||
|
*
|
||||||
|
* Looks up and returns the value corresponding to the capability
|
||||||
|
* (KVM_CAP_*) given by cap.
|
||||||
|
*/
|
||||||
|
int vm_check_cap(struct kvm_vm *vm, long cap)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = ioctl(vm->fd, KVM_CHECK_EXTENSION, cap);
|
||||||
|
TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION VM IOCTL failed,\n"
|
||||||
|
" rc: %i errno: %i", ret, errno);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* VM Enable Capability
|
/* VM Enable Capability
|
||||||
*
|
*
|
||||||
* Input Args:
|
* Input Args:
|
||||||
@ -366,6 +393,11 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
|
|||||||
struct kvm_vm *vm;
|
struct kvm_vm *vm;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Permission needs to be requested before KVM_SET_CPUID2.
|
||||||
|
*/
|
||||||
|
vm_xsave_req_perm();
|
||||||
|
|
||||||
/* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
|
/* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
|
||||||
if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
|
if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
|
||||||
slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
|
slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
|
||||||
|
@ -650,6 +650,45 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid)
|
|||||||
vcpu_sregs_set(vm, vcpuid, &sregs);
|
vcpu_sregs_set(vm, vcpuid, &sregs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define CPUID_XFD_BIT (1 << 4)
|
||||||
|
static bool is_xfd_supported(void)
|
||||||
|
{
|
||||||
|
int eax, ebx, ecx, edx;
|
||||||
|
const int leaf = 0xd, subleaf = 0x1;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"cpuid"
|
||||||
|
: /* output */ "=a"(eax), "=b"(ebx),
|
||||||
|
"=c"(ecx), "=d"(edx)
|
||||||
|
: /* input */ "0"(leaf), "2"(subleaf));
|
||||||
|
|
||||||
|
return !!(eax & CPUID_XFD_BIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
void vm_xsave_req_perm(void)
|
||||||
|
{
|
||||||
|
unsigned long bitmask;
|
||||||
|
long rc;
|
||||||
|
|
||||||
|
if (!is_xfd_supported())
|
||||||
|
return;
|
||||||
|
|
||||||
|
rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM,
|
||||||
|
XSTATE_XTILE_DATA_BIT);
|
||||||
|
/*
|
||||||
|
* The older kernel version(<5.15) can't support
|
||||||
|
* ARCH_REQ_XCOMP_GUEST_PERM and directly return.
|
||||||
|
*/
|
||||||
|
if (rc)
|
||||||
|
return;
|
||||||
|
|
||||||
|
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
|
||||||
|
TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
|
||||||
|
TEST_ASSERT(bitmask & XFEATURE_XTILE_MASK,
|
||||||
|
"prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx",
|
||||||
|
bitmask);
|
||||||
|
}
|
||||||
|
|
||||||
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
|
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
|
||||||
{
|
{
|
||||||
struct kvm_mp_state mp_state;
|
struct kvm_mp_state mp_state;
|
||||||
@ -1018,10 +1057,10 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_x86_state {
|
struct kvm_x86_state {
|
||||||
|
struct kvm_xsave *xsave;
|
||||||
struct kvm_vcpu_events events;
|
struct kvm_vcpu_events events;
|
||||||
struct kvm_mp_state mp_state;
|
struct kvm_mp_state mp_state;
|
||||||
struct kvm_regs regs;
|
struct kvm_regs regs;
|
||||||
struct kvm_xsave xsave;
|
|
||||||
struct kvm_xcrs xcrs;
|
struct kvm_xcrs xcrs;
|
||||||
struct kvm_sregs sregs;
|
struct kvm_sregs sregs;
|
||||||
struct kvm_debugregs debugregs;
|
struct kvm_debugregs debugregs;
|
||||||
@ -1069,6 +1108,22 @@ struct kvm_msr_list *kvm_get_msr_index_list(void)
|
|||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vcpu_save_xsave_state(struct kvm_vm *vm, struct vcpu *vcpu,
|
||||||
|
struct kvm_x86_state *state)
|
||||||
|
{
|
||||||
|
int size;
|
||||||
|
|
||||||
|
size = vm_check_cap(vm, KVM_CAP_XSAVE2);
|
||||||
|
if (!size)
|
||||||
|
size = sizeof(struct kvm_xsave);
|
||||||
|
|
||||||
|
state->xsave = malloc(size);
|
||||||
|
if (size == sizeof(struct kvm_xsave))
|
||||||
|
return ioctl(vcpu->fd, KVM_GET_XSAVE, state->xsave);
|
||||||
|
else
|
||||||
|
return ioctl(vcpu->fd, KVM_GET_XSAVE2, state->xsave);
|
||||||
|
}
|
||||||
|
|
||||||
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
|
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
|
||||||
{
|
{
|
||||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||||
@ -1112,7 +1167,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
|
|||||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
|
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
|
||||||
r);
|
r);
|
||||||
|
|
||||||
r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave);
|
r = vcpu_save_xsave_state(vm, vcpu, state);
|
||||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
|
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
|
||||||
r);
|
r);
|
||||||
|
|
||||||
@ -1157,7 +1212,7 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
|
|||||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
|
r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave);
|
||||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
|
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
|
||||||
r);
|
r);
|
||||||
|
|
||||||
@ -1198,6 +1253,12 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_x86_state_cleanup(struct kvm_x86_state *state)
|
||||||
|
{
|
||||||
|
free(state->xsave);
|
||||||
|
free(state);
|
||||||
|
}
|
||||||
|
|
||||||
bool is_intel_cpu(void)
|
bool is_intel_cpu(void)
|
||||||
{
|
{
|
||||||
int eax, ebx, ecx, edx;
|
int eax, ebx, ecx, edx;
|
||||||
|
@ -129,7 +129,7 @@ static void save_restore_vm(struct kvm_vm *vm)
|
|||||||
vcpu_set_hv_cpuid(vm, VCPU_ID);
|
vcpu_set_hv_cpuid(vm, VCPU_ID);
|
||||||
vcpu_enable_evmcs(vm, VCPU_ID);
|
vcpu_enable_evmcs(vm, VCPU_ID);
|
||||||
vcpu_load_state(vm, VCPU_ID, state);
|
vcpu_load_state(vm, VCPU_ID, state);
|
||||||
free(state);
|
kvm_x86_state_cleanup(state);
|
||||||
|
|
||||||
memset(®s2, 0, sizeof(regs2));
|
memset(®s2, 0, sizeof(regs2));
|
||||||
vcpu_regs_get(vm, VCPU_ID, ®s2);
|
vcpu_regs_get(vm, VCPU_ID, ®s2);
|
||||||
|
@ -212,7 +212,7 @@ int main(int argc, char *argv[])
|
|||||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||||
vcpu_load_state(vm, VCPU_ID, state);
|
vcpu_load_state(vm, VCPU_ID, state);
|
||||||
run = vcpu_state(vm, VCPU_ID);
|
run = vcpu_state(vm, VCPU_ID);
|
||||||
free(state);
|
kvm_x86_state_cleanup(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
|
@ -218,7 +218,7 @@ int main(int argc, char *argv[])
|
|||||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||||
vcpu_load_state(vm, VCPU_ID, state);
|
vcpu_load_state(vm, VCPU_ID, state);
|
||||||
run = vcpu_state(vm, VCPU_ID);
|
run = vcpu_state(vm, VCPU_ID);
|
||||||
free(state);
|
kvm_x86_state_cleanup(state);
|
||||||
|
|
||||||
memset(®s2, 0, sizeof(regs2));
|
memset(®s2, 0, sizeof(regs2));
|
||||||
vcpu_regs_get(vm, VCPU_ID, ®s2);
|
vcpu_regs_get(vm, VCPU_ID, ®s2);
|
||||||
|
@ -244,7 +244,7 @@ int main(int argc, char *argv[])
|
|||||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||||
vcpu_load_state(vm, VCPU_ID, state);
|
vcpu_load_state(vm, VCPU_ID, state);
|
||||||
run = vcpu_state(vm, VCPU_ID);
|
run = vcpu_state(vm, VCPU_ID);
|
||||||
free(state);
|
kvm_x86_state_cleanup(state);
|
||||||
|
|
||||||
memset(®s2, 0, sizeof(regs2));
|
memset(®s2, 0, sizeof(regs2));
|
||||||
vcpu_regs_get(vm, VCPU_ID, ®s2);
|
vcpu_regs_get(vm, VCPU_ID, ®s2);
|
||||||
|
Loading…
Reference in New Issue
Block a user