KVM: x86: Move .pmu_ops to kvm_x86_init_ops and tag as __initdata
The pmu_ops should be moved to kvm_x86_init_ops and tagged as __initdata. That'll save those precious few bytes, and more importantly make the original ops unreachable, i.e. make it harder to sneak in post-init modification bugs. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Like Xu <likexu@tencent.com> Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220329235054.3534728-4-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
8f969c0c34
commit
34886e796c
@ -1465,8 +1465,6 @@ struct kvm_x86_ops {
|
||||
int cpu_dirty_log_size;
|
||||
void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* pmu operations of sub-arch */
|
||||
const struct kvm_pmu_ops *pmu_ops;
|
||||
const struct kvm_x86_nested_ops *nested_ops;
|
||||
|
||||
void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
|
||||
@ -1542,6 +1540,7 @@ struct kvm_x86_init_ops {
|
||||
unsigned int (*handle_intel_pt_intr)(void);
|
||||
|
||||
struct kvm_x86_ops *runtime_ops;
|
||||
struct kvm_pmu_ops *pmu_ops;
|
||||
};
|
||||
|
||||
struct kvm_arch_async_pf {
|
||||
|
@ -319,7 +319,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
struct kvm_pmu_ops amd_pmu_ops = {
|
||||
struct kvm_pmu_ops amd_pmu_ops __initdata = {
|
||||
.pmc_perf_hw_id = amd_pmc_perf_hw_id,
|
||||
.pmc_is_enabled = amd_pmc_is_enabled,
|
||||
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
|
||||
|
@ -4694,7 +4694,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
||||
|
||||
.sched_in = svm_sched_in,
|
||||
|
||||
.pmu_ops = &amd_pmu_ops,
|
||||
.nested_ops = &svm_nested_ops,
|
||||
|
||||
.deliver_interrupt = svm_deliver_interrupt,
|
||||
@ -4988,6 +4987,7 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
|
||||
.check_processor_compatibility = svm_check_processor_compat,
|
||||
|
||||
.runtime_ops = &svm_x86_ops,
|
||||
.pmu_ops = &amd_pmu_ops,
|
||||
};
|
||||
|
||||
static int __init svm_init(void)
|
||||
|
@ -723,7 +723,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
|
||||
intel_pmu_release_guest_lbr_event(vcpu);
|
||||
}
|
||||
|
||||
struct kvm_pmu_ops intel_pmu_ops = {
|
||||
struct kvm_pmu_ops intel_pmu_ops __initdata = {
|
||||
.pmc_perf_hw_id = intel_pmc_perf_hw_id,
|
||||
.pmc_is_enabled = intel_pmc_is_enabled,
|
||||
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
|
||||
|
@ -7816,7 +7816,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
||||
.cpu_dirty_log_size = PML_ENTITY_NUM,
|
||||
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
|
||||
|
||||
.pmu_ops = &intel_pmu_ops,
|
||||
.nested_ops = &vmx_nested_ops,
|
||||
|
||||
.pi_update_irte = vmx_pi_update_irte,
|
||||
@ -8070,6 +8069,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
|
||||
.handle_intel_pt_intr = NULL,
|
||||
|
||||
.runtime_ops = &vmx_x86_ops,
|
||||
.pmu_ops = &intel_pmu_ops,
|
||||
};
|
||||
|
||||
static void vmx_cleanup_l1d_flush(void)
|
||||
|
@ -11633,7 +11633,7 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
|
||||
#include <asm/kvm-x86-ops.h>
|
||||
#undef __KVM_X86_OP
|
||||
|
||||
kvm_pmu_ops_update(ops->runtime_ops->pmu_ops);
|
||||
kvm_pmu_ops_update(ops->pmu_ops);
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_setup(void *opaque)
|
||||
|
Loading…
Reference in New Issue
Block a user