mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
KVM: x86: SVM: drop preempt-safe wrappers for avic_vcpu_load/put
Now that these functions are always called with preemption disabled, remove the preempt_disable()/preempt_enable() pair inside them. No functional change intended. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20220606180829.102503-8-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
18869f26df
commit
ba8ec27324
@ -946,7 +946,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
u64 entry;
|
||||
int h_physical_id = kvm_cpu_get_apicid(cpu);
|
||||
@ -978,7 +978,7 @@ void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
|
||||
}
|
||||
|
||||
void __avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 entry;
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
@ -997,25 +997,6 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
|
||||
}
|
||||
|
||||
static void avic_vcpu_load(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
|
||||
WARN_ON(cpu != vcpu->cpu);
|
||||
|
||||
__avic_vcpu_load(vcpu, cpu);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
__avic_vcpu_put(vcpu);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -1042,7 +1023,7 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
||||
vmcb_mark_dirty(vmcb, VMCB_AVIC);
|
||||
|
||||
if (activated)
|
||||
avic_vcpu_load(vcpu);
|
||||
avic_vcpu_load(vcpu, vcpu->cpu);
|
||||
else
|
||||
avic_vcpu_put(vcpu);
|
||||
|
||||
@ -1075,5 +1056,5 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
if (!kvm_vcpu_apicv_active(vcpu))
|
||||
return;
|
||||
|
||||
avic_vcpu_load(vcpu);
|
||||
avic_vcpu_load(vcpu, vcpu->cpu);
|
||||
}
|
||||
|
@ -1400,13 +1400,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
indirect_branch_prediction_barrier();
|
||||
}
|
||||
if (kvm_vcpu_apicv_active(vcpu))
|
||||
__avic_vcpu_load(vcpu, cpu);
|
||||
avic_vcpu_load(vcpu, cpu);
|
||||
}
|
||||
|
||||
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_apicv_active(vcpu))
|
||||
__avic_vcpu_put(vcpu);
|
||||
avic_vcpu_put(vcpu);
|
||||
|
||||
svm_prepare_host_switch(vcpu);
|
||||
|
||||
|
@ -610,8 +610,8 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
|
||||
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
|
||||
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
|
||||
int avic_init_vcpu(struct vcpu_svm *svm);
|
||||
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
void __avic_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
void avic_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
|
||||
void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
||||
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
|
||||
|
Loading…
Reference in New Issue
Block a user