mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
KVM: x86: generalize guest_cpuid_has_ helpers
This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid, X86_FEATURE_XYZ), which gets rid of many very similar helpers. When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but this information isn't in common code, so we recreate it for KVM. Add some BUILD_BUG_ONs to make sure that it runs nicely. Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
c6bd18011f
commit
d6321d4933
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include "x86.h"
|
#include "x86.h"
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
#include <asm/processor.h>
|
||||||
|
|
||||||
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
|
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_mpx_supported(void);
|
bool kvm_mpx_supported(void);
|
||||||
@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
|
|||||||
return vcpu->arch.maxphyaddr;
|
return vcpu->arch.maxphyaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
|
struct cpuid_reg {
|
||||||
{
|
u32 function;
|
||||||
struct kvm_cpuid_entry2 *best;
|
u32 index;
|
||||||
|
int reg;
|
||||||
|
};
|
||||||
|
|
||||||
if (!static_cpu_has(X86_FEATURE_XSAVE))
|
static const struct cpuid_reg reverse_cpuid[] = {
|
||||||
|
[CPUID_1_EDX] = { 1, 0, CPUID_EDX},
|
||||||
|
[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
|
||||||
|
[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
|
||||||
|
[CPUID_1_ECX] = { 1, 0, CPUID_ECX},
|
||||||
|
[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
|
||||||
|
[CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
|
||||||
|
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
|
||||||
|
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
|
||||||
|
[CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
|
||||||
|
[CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
|
||||||
|
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
|
||||||
|
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
|
||||||
|
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
|
||||||
|
[CPUID_7_ECX] = { 7, 0, CPUID_ECX},
|
||||||
|
[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
|
||||||
|
};
|
||||||
|
|
||||||
|
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
|
||||||
|
{
|
||||||
|
unsigned x86_leaf = x86_feature / 32;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
|
||||||
|
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
|
||||||
|
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
|
||||||
|
|
||||||
|
return reverse_cpuid[x86_leaf];
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *entry;
|
||||||
|
const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
|
||||||
|
|
||||||
|
entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
|
||||||
|
if (!entry)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
switch (cpuid.reg) {
|
||||||
|
case CPUID_EAX:
|
||||||
|
return &entry->eax;
|
||||||
|
case CPUID_EBX:
|
||||||
|
return &entry->ebx;
|
||||||
|
case CPUID_ECX:
|
||||||
|
return &entry->ecx;
|
||||||
|
case CPUID_EDX:
|
||||||
|
return &entry->edx;
|
||||||
|
default:
|
||||||
|
BUILD_BUG();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
|
||||||
|
{
|
||||||
|
int *reg;
|
||||||
|
|
||||||
|
if (x86_feature == X86_FEATURE_XSAVE &&
|
||||||
|
!static_cpu_has(X86_FEATURE_XSAVE))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
reg = guest_cpuid_get_register(vcpu, x86_feature);
|
||||||
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
|
if (!reg)
|
||||||
}
|
return false;
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
|
return *reg & bit(x86_feature);
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
||||||
return best && (best->edx & bit(X86_FEATURE_MTRR));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
||||||
return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
||||||
return best && (best->ebx & bit(X86_FEATURE_SMEP));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
||||||
return best && (best->ebx & bit(X86_FEATURE_SMAP));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
||||||
return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
||||||
return best && (best->ecx & bit(X86_FEATURE_PKU));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
||||||
return best && (best->edx & bit(X86_FEATURE_LM));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
||||||
return best && (best->ecx & bit(X86_FEATURE_OSVW));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
||||||
return best && (best->ecx & bit(X86_FEATURE_PCID));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
||||||
return best && (best->ecx & bit(X86_FEATURE_X2APIC));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
|
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
|
||||||
@ -128,46 +112,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
|
|||||||
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
|
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
||||||
return best && (best->edx & bit(X86_FEATURE_GBPAGES));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
||||||
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
||||||
return best && (best->ebx & bit(X86_FEATURE_MPX));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
||||||
return best && (best->edx & bit(X86_FEATURE_RDTSCP));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best;
|
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
|
|
||||||
return best && (best->edx & bit(X86_FEATURE_NRIPS));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
|
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpuid_entry2 *best;
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
@ -4052,7 +4052,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
|
|||||||
{
|
{
|
||||||
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
|
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
|
||||||
cpuid_maxphyaddr(vcpu), context->root_level,
|
cpuid_maxphyaddr(vcpu), context->root_level,
|
||||||
context->nx, guest_cpuid_has_gbpages(vcpu),
|
context->nx,
|
||||||
|
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
|
||||||
is_pse(vcpu), guest_cpuid_is_amd(vcpu));
|
is_pse(vcpu), guest_cpuid_is_amd(vcpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4114,8 +4115,8 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
|
|||||||
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
|
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
|
||||||
boot_cpu_data.x86_phys_bits,
|
boot_cpu_data.x86_phys_bits,
|
||||||
context->shadow_root_level, uses_nx,
|
context->shadow_root_level, uses_nx,
|
||||||
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
|
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
|
||||||
true);
|
is_pse(vcpu), true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
|
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
|
|||||||
* enable MTRRs and it is obviously undesirable to run the
|
* enable MTRRs and it is obviously undesirable to run the
|
||||||
* guest entirely with UC memory and we use WB.
|
* guest entirely with UC memory and we use WB.
|
||||||
*/
|
*/
|
||||||
if (guest_cpuid_has_mtrr(vcpu))
|
if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
|
||||||
return MTRR_TYPE_UNCACHABLE;
|
return MTRR_TYPE_UNCACHABLE;
|
||||||
else
|
else
|
||||||
return MTRR_TYPE_WRBACK;
|
return MTRR_TYPE_WRBACK;
|
||||||
|
@ -5078,7 +5078,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_cpuid_entry2 *entry;
|
struct kvm_cpuid_entry2 *entry;
|
||||||
|
|
||||||
/* Update nrips enabled cache */
|
/* Update nrips enabled cache */
|
||||||
svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
|
svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
|
||||||
|
|
||||||
if (!kvm_vcpu_apicv_active(vcpu))
|
if (!kvm_vcpu_apicv_active(vcpu))
|
||||||
return;
|
return;
|
||||||
|
@ -2611,7 +2611,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
|||||||
if (index >= 0)
|
if (index >= 0)
|
||||||
move_msr_up(vmx, index, save_nmsrs++);
|
move_msr_up(vmx, index, save_nmsrs++);
|
||||||
index = __find_msr_index(vmx, MSR_TSC_AUX);
|
index = __find_msr_index(vmx, MSR_TSC_AUX);
|
||||||
if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
|
if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
|
||||||
move_msr_up(vmx, index, save_nmsrs++);
|
move_msr_up(vmx, index, save_nmsrs++);
|
||||||
/*
|
/*
|
||||||
* MSR_STAR is only needed on long mode guests, and only
|
* MSR_STAR is only needed on long mode guests, and only
|
||||||
@ -2671,12 +2671,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
||||||
return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nested_vmx_allowed() checks whether a guest should be allowed to use VMX
|
* nested_vmx_allowed() checks whether a guest should be allowed to use VMX
|
||||||
* instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
|
* instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
|
||||||
@ -2685,7 +2679,7 @@ static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
|
static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return nested && guest_cpuid_has_vmx(vcpu);
|
return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3281,7 +3275,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
break;
|
break;
|
||||||
case MSR_IA32_BNDCFGS:
|
case MSR_IA32_BNDCFGS:
|
||||||
if (!kvm_mpx_supported() ||
|
if (!kvm_mpx_supported() ||
|
||||||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
|
(!msr_info->host_initiated &&
|
||||||
|
!guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
|
||||||
return 1;
|
return 1;
|
||||||
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
|
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
|
||||||
break;
|
break;
|
||||||
@ -3305,7 +3300,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
msr_info->data = vcpu->arch.ia32_xss;
|
msr_info->data = vcpu->arch.ia32_xss;
|
||||||
break;
|
break;
|
||||||
case MSR_TSC_AUX:
|
case MSR_TSC_AUX:
|
||||||
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
|
if (!msr_info->host_initiated &&
|
||||||
|
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
|
||||||
return 1;
|
return 1;
|
||||||
/* Otherwise falls through */
|
/* Otherwise falls through */
|
||||||
default:
|
default:
|
||||||
@ -3364,7 +3360,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
break;
|
break;
|
||||||
case MSR_IA32_BNDCFGS:
|
case MSR_IA32_BNDCFGS:
|
||||||
if (!kvm_mpx_supported() ||
|
if (!kvm_mpx_supported() ||
|
||||||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
|
(!msr_info->host_initiated &&
|
||||||
|
!guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
|
||||||
return 1;
|
return 1;
|
||||||
if (is_noncanonical_address(data & PAGE_MASK) ||
|
if (is_noncanonical_address(data & PAGE_MASK) ||
|
||||||
(data & MSR_IA32_BNDCFGS_RSVD))
|
(data & MSR_IA32_BNDCFGS_RSVD))
|
||||||
@ -3427,7 +3424,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
|
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
|
||||||
break;
|
break;
|
||||||
case MSR_TSC_AUX:
|
case MSR_TSC_AUX:
|
||||||
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
|
if (!msr_info->host_initiated &&
|
||||||
|
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
|
||||||
return 1;
|
return 1;
|
||||||
/* Check reserved bit, higher 32 bits should be zero */
|
/* Check reserved bit, higher 32 bits should be zero */
|
||||||
if ((data >> 32) != 0)
|
if ((data >> 32) != 0)
|
||||||
@ -9622,7 +9620,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||||||
u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
|
u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
|
||||||
|
|
||||||
if (vmx_rdtscp_supported()) {
|
if (vmx_rdtscp_supported()) {
|
||||||
bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
|
bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
|
||||||
if (!rdtscp_enabled)
|
if (!rdtscp_enabled)
|
||||||
secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
|
secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
|
||||||
|
|
||||||
@ -9641,7 +9639,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
|
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
|
||||||
bool invpcid_enabled =
|
bool invpcid_enabled =
|
||||||
best && best->ebx & bit(X86_FEATURE_INVPCID) &&
|
best && best->ebx & bit(X86_FEATURE_INVPCID) &&
|
||||||
guest_cpuid_has_pcid(vcpu);
|
guest_cpuid_has(vcpu, X86_FEATURE_PCID);
|
||||||
|
|
||||||
if (!invpcid_enabled) {
|
if (!invpcid_enabled) {
|
||||||
secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
|
secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
|
||||||
|
@ -310,8 +310,8 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
||||||
u64 new_state = msr_info->data &
|
u64 new_state = msr_info->data &
|
||||||
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
||||||
u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
|
u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
|
||||||
0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
|
(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
|
||||||
|
|
||||||
if (!msr_info->host_initiated &&
|
if (!msr_info->host_initiated &&
|
||||||
((msr_info->data & reserved_bits) != 0 ||
|
((msr_info->data & reserved_bits) != 0 ||
|
||||||
@ -754,19 +754,19 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
if (cr4 & CR4_RESERVED_BITS)
|
if (cr4 & CR4_RESERVED_BITS)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (is_long_mode(vcpu)) {
|
if (is_long_mode(vcpu)) {
|
||||||
@ -779,7 +779,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
|
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
|
||||||
if (!guest_cpuid_has_pcid(vcpu))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
|
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
|
||||||
@ -883,7 +883,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
u64 fixed = DR6_FIXED_1;
|
u64 fixed = DR6_FIXED_1;
|
||||||
|
|
||||||
if (!guest_cpuid_has_rtm(vcpu))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
|
||||||
fixed |= DR6_RTM;
|
fixed |= DR6_RTM;
|
||||||
return fixed;
|
return fixed;
|
||||||
}
|
}
|
||||||
@ -1534,8 +1534,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||||||
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
|
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
|
||||||
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
|
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
|
||||||
|
|
||||||
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
|
if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
|
||||||
update_ia32_tsc_adjust_msr(vcpu, offset);
|
update_ia32_tsc_adjust_msr(vcpu, offset);
|
||||||
|
|
||||||
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
||||||
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
||||||
|
|
||||||
@ -2185,7 +2186,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
kvm_set_lapic_tscdeadline_msr(vcpu, data);
|
kvm_set_lapic_tscdeadline_msr(vcpu, data);
|
||||||
break;
|
break;
|
||||||
case MSR_IA32_TSC_ADJUST:
|
case MSR_IA32_TSC_ADJUST:
|
||||||
if (guest_cpuid_has_tsc_adjust(vcpu)) {
|
if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
|
||||||
if (!msr_info->host_initiated) {
|
if (!msr_info->host_initiated) {
|
||||||
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
|
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
|
||||||
adjust_tsc_offset_guest(vcpu, adj);
|
adjust_tsc_offset_guest(vcpu, adj);
|
||||||
@ -2307,12 +2308,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
|
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
|
||||||
break;
|
break;
|
||||||
case MSR_AMD64_OSVW_ID_LENGTH:
|
case MSR_AMD64_OSVW_ID_LENGTH:
|
||||||
if (!guest_cpuid_has_osvw(vcpu))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
||||||
return 1;
|
return 1;
|
||||||
vcpu->arch.osvw.length = data;
|
vcpu->arch.osvw.length = data;
|
||||||
break;
|
break;
|
||||||
case MSR_AMD64_OSVW_STATUS:
|
case MSR_AMD64_OSVW_STATUS:
|
||||||
if (!guest_cpuid_has_osvw(vcpu))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
||||||
return 1;
|
return 1;
|
||||||
vcpu->arch.osvw.status = data;
|
vcpu->arch.osvw.status = data;
|
||||||
break;
|
break;
|
||||||
@ -2537,12 +2538,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
msr_info->data = 0xbe702111;
|
msr_info->data = 0xbe702111;
|
||||||
break;
|
break;
|
||||||
case MSR_AMD64_OSVW_ID_LENGTH:
|
case MSR_AMD64_OSVW_ID_LENGTH:
|
||||||
if (!guest_cpuid_has_osvw(vcpu))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
||||||
return 1;
|
return 1;
|
||||||
msr_info->data = vcpu->arch.osvw.length;
|
msr_info->data = vcpu->arch.osvw.length;
|
||||||
break;
|
break;
|
||||||
case MSR_AMD64_OSVW_STATUS:
|
case MSR_AMD64_OSVW_STATUS:
|
||||||
if (!guest_cpuid_has_osvw(vcpu))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
||||||
return 1;
|
return 1;
|
||||||
msr_info->data = vcpu->arch.osvw.status;
|
msr_info->data = vcpu->arch.osvw.status;
|
||||||
break;
|
break;
|
||||||
@ -6606,7 +6607,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|||||||
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
||||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
vcpu->arch.hflags |= HF_SMM_MASK;
|
||||||
memset(buf, 0, 512);
|
memset(buf, 0, 512);
|
||||||
if (guest_cpuid_has_longmode(vcpu))
|
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||||
enter_smm_save_state_64(vcpu, buf);
|
enter_smm_save_state_64(vcpu, buf);
|
||||||
else
|
else
|
||||||
enter_smm_save_state_32(vcpu, buf);
|
enter_smm_save_state_32(vcpu, buf);
|
||||||
@ -6658,7 +6659,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|||||||
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
||||||
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
||||||
|
|
||||||
if (guest_cpuid_has_longmode(vcpu))
|
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||||
kvm_x86_ops->set_efer(vcpu, 0);
|
kvm_x86_ops->set_efer(vcpu, 0);
|
||||||
|
|
||||||
kvm_update_cpuid(vcpu);
|
kvm_update_cpuid(vcpu);
|
||||||
@ -7424,7 +7425,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|||||||
int pending_vec, max_bits, idx;
|
int pending_vec, max_bits, idx;
|
||||||
struct desc_ptr dt;
|
struct desc_ptr dt;
|
||||||
|
|
||||||
if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
|
||||||
|
(sregs->cr4 & X86_CR4_OSXSAVE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
dt.size = sregs->idt.limit;
|
dt.size = sregs->idt.limit;
|
||||||
|
Loading…
Reference in New Issue
Block a user