mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
KVM: use yield_to instead of sleep in kvm_vcpu_on_spin
Instead of sleeping in kvm_vcpu_on_spin, which can cause gigantic slowdowns of certain workloads, we instead use yield_to to get another VCPU in the same KVM guest to run sooner. This seems to give a 10-15% speedup in certain workloads. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
34bb10b79d
commit
217ece6129
@ -235,6 +235,7 @@ struct kvm {
|
|||||||
#endif
|
#endif
|
||||||
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
|
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
|
||||||
atomic_t online_vcpus;
|
atomic_t online_vcpus;
|
||||||
|
int last_boosted_vcpu;
|
||||||
struct list_head vm_list;
|
struct list_head vm_list;
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct kvm_io_bus *buses[KVM_NR_BUSES];
|
struct kvm_io_bus *buses[KVM_NR_BUSES];
|
||||||
|
@ -1484,18 +1484,55 @@ void kvm_resched(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_resched);
|
EXPORT_SYMBOL_GPL(kvm_resched);
|
||||||
|
|
||||||
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
|
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
|
||||||
{
|
{
|
||||||
ktime_t expires;
|
struct kvm *kvm = me->kvm;
|
||||||
DEFINE_WAIT(wait);
|
struct kvm_vcpu *vcpu;
|
||||||
|
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
|
||||||
|
int yielded = 0;
|
||||||
|
int pass;
|
||||||
|
int i;
|
||||||
|
|
||||||
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
|
/*
|
||||||
|
* We boost the priority of a VCPU that is runnable but not
|
||||||
/* Sleep for 100 us, and hope lock-holder got scheduled */
|
* currently running, because it got preempted by something
|
||||||
expires = ktime_add_ns(ktime_get(), 100000UL);
|
* else and called schedule in __vcpu_run. Hopefully that
|
||||||
schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
|
* VCPU is holding the lock that we need and will release it.
|
||||||
|
* We approximate round-robin by starting at the last boosted VCPU.
|
||||||
finish_wait(&vcpu->wq, &wait);
|
*/
|
||||||
|
for (pass = 0; pass < 2 && !yielded; pass++) {
|
||||||
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
|
struct task_struct *task = NULL;
|
||||||
|
struct pid *pid;
|
||||||
|
if (!pass && i < last_boosted_vcpu) {
|
||||||
|
i = last_boosted_vcpu;
|
||||||
|
continue;
|
||||||
|
} else if (pass && i > last_boosted_vcpu)
|
||||||
|
break;
|
||||||
|
if (vcpu == me)
|
||||||
|
continue;
|
||||||
|
if (waitqueue_active(&vcpu->wq))
|
||||||
|
continue;
|
||||||
|
rcu_read_lock();
|
||||||
|
pid = rcu_dereference(vcpu->pid);
|
||||||
|
if (pid)
|
||||||
|
task = get_pid_task(vcpu->pid, PIDTYPE_PID);
|
||||||
|
rcu_read_unlock();
|
||||||
|
if (!task)
|
||||||
|
continue;
|
||||||
|
if (task->flags & PF_VCPU) {
|
||||||
|
put_task_struct(task);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (yield_to(task, 1)) {
|
||||||
|
put_task_struct(task);
|
||||||
|
kvm->last_boosted_vcpu = i;
|
||||||
|
yielded = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
put_task_struct(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user