forked from Minki/linux
KVM: Note down when cpu relax intercepted or pause loop exited
Noting pause loop exited vcpu or cpu relax intercepted helps in filtering right candidate to yield. Wrong selection of vcpu; i.e., a vcpu that just did a pl-exit or cpu relax intercepted may contribute to performance degradation. Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> # on s390x Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
f2a7434731
commit
4c088493c8
@ -183,6 +183,18 @@ struct kvm_vcpu {
|
||||
} async_pf;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
/*
|
||||
* Cpu relax intercept or pause loop exit optimization
|
||||
* in_spin_loop: set when a vcpu does a pause loop exit
|
||||
* or cpu relax intercepted.
|
||||
* dy_eligible: indicates whether vcpu is eligible for directed yield.
|
||||
*/
|
||||
struct {
|
||||
bool in_spin_loop;
|
||||
bool dy_eligible;
|
||||
} spin_loop;
|
||||
#endif
|
||||
struct kvm_vcpu_arch arch;
|
||||
};
|
||||
|
||||
@ -898,5 +910,27 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
|
||||
static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
vcpu->spin_loop.in_spin_loop = val;
|
||||
}
|
||||
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
vcpu->spin_loop.dy_eligible = val;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
||||
|
||||
static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
||||
#endif
|
||||
|
||||
|
@ -239,6 +239,9 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
|
||||
}
|
||||
vcpu->run = page_address(page);
|
||||
|
||||
kvm_vcpu_set_in_spin_loop(vcpu, false);
|
||||
kvm_vcpu_set_dy_eligible(vcpu, false);
|
||||
|
||||
r = kvm_arch_vcpu_init(vcpu);
|
||||
if (r < 0)
|
||||
goto fail_free_run;
|
||||
@ -1585,6 +1588,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
|
||||
int pass;
|
||||
int i;
|
||||
|
||||
kvm_vcpu_set_in_spin_loop(me, true);
|
||||
/*
|
||||
* We boost the priority of a VCPU that is runnable but not
|
||||
* currently running, because it got preempted by something
|
||||
@ -1610,6 +1614,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
|
||||
}
|
||||
}
|
||||
}
|
||||
kvm_vcpu_set_in_spin_loop(me, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user