KVM: nVMX: Expose load IA32_PERF_GLOBAL_CTRL VM-{Entry,Exit} control
The "load IA32_PERF_GLOBAL_CTRL" bit for VM-entry and VM-exit should only be exposed to the guest if IA32_PERF_GLOBAL_CTRL is a valid MSR. Create a new helper to allow pmu_refresh() to update the VM-Entry and VM-Exit controls to ensure PMU values are initialized when performing the is_valid_msr() check. Suggested-by: Jim Mattson <jmattson@google.com> Co-developed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com> Signed-off-by: Oliver Upton <oupton@google.com> Reviewed-by: Jim Mattson <jmattson@google.com> Reviewed-by: Peter Shier <pshier@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
71f7347025
commit
03a8871add
@ -4359,6 +4359,27 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx;
|
||||||
|
|
||||||
|
if (!nested_vmx_allowed(vcpu))
|
||||||
|
return;
|
||||||
|
|
||||||
|
vmx = to_vmx(vcpu);
|
||||||
|
if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
|
||||||
|
vmx->nested.msrs.entry_ctls_high |=
|
||||||
|
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
|
vmx->nested.msrs.exit_ctls_high |=
|
||||||
|
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
|
} else {
|
||||||
|
vmx->nested.msrs.entry_ctls_high &=
|
||||||
|
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
|
vmx->nested.msrs.exit_ctls_high &=
|
||||||
|
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
|
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
|
||||||
{
|
{
|
||||||
gva_t gva;
|
gva_t gva;
|
||||||
|
@ -22,6 +22,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
|
|||||||
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
|
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
|
||||||
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
|
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
|
||||||
u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
|
u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
|
||||||
|
void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
|
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include "x86.h"
|
#include "x86.h"
|
||||||
#include "cpuid.h"
|
#include "cpuid.h"
|
||||||
#include "lapic.h"
|
#include "lapic.h"
|
||||||
|
#include "nested.h"
|
||||||
#include "pmu.h"
|
#include "pmu.h"
|
||||||
|
|
||||||
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
|
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
|
||||||
@ -335,6 +336,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
|||||||
0, pmu->nr_arch_gp_counters);
|
0, pmu->nr_arch_gp_counters);
|
||||||
bitmap_set(pmu->all_valid_pmc_idx,
|
bitmap_set(pmu->all_valid_pmc_idx,
|
||||||
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
|
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
|
||||||
|
|
||||||
|
nested_vmx_pmu_entry_exit_ctls_update(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
||||||
|
Loading…
Reference in New Issue
Block a user