mirror of
https://github.com/torvalds/linux.git
synced 2024-12-18 00:53:40 +00:00
KVM: Use cond_resched() directly and remove useless kvm_resched()
Since the commit 15ad7146
("KVM: Use the scheduler preemption notifiers
to make kvm preemptible"), the remaining stuff in this function is a
simple cond_resched() call with an extra need_resched() check which was
there to avoid dropping VCPUs unnecessarily. Now it is meaningless.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
6bb05ef785
commit
c08ac06ab3
@ -702,7 +702,7 @@ again:
|
|||||||
out:
|
out:
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
if (r > 0) {
|
if (r > 0) {
|
||||||
kvm_resched(vcpu);
|
cond_resched();
|
||||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
@ -1348,7 +1348,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||||||
kvm_guest_exit();
|
kvm_guest_exit();
|
||||||
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
kvm_resched(vcpu);
|
cond_resched();
|
||||||
|
|
||||||
spin_lock(&vc->lock);
|
spin_lock(&vc->lock);
|
||||||
now = get_tb();
|
now = get_tb();
|
||||||
|
@ -6125,7 +6125,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
if (need_resched()) {
|
if (need_resched()) {
|
||||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||||
kvm_resched(vcpu);
|
cond_resched();
|
||||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -583,7 +583,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu);
|
|||||||
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
|
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
|
bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
|
||||||
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
|
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
|
||||||
void kvm_resched(struct kvm_vcpu *vcpu);
|
|
||||||
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
|
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
|
||||||
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
|
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
@ -1710,14 +1710,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
|
|||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
|
||||||
#endif /* !CONFIG_S390 */
|
#endif /* !CONFIG_S390 */
|
||||||
|
|
||||||
void kvm_resched(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
if (!need_resched())
|
|
||||||
return;
|
|
||||||
cond_resched();
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_resched);
|
|
||||||
|
|
||||||
bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
|
bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
|
||||||
{
|
{
|
||||||
struct pid *pid;
|
struct pid *pid;
|
||||||
|
Loading…
Reference in New Issue
Block a user