forked from Minki/linux
locking/pvstat: Separate wait_again and spurious wakeup stats
Currently there are overlap in the pvqspinlock wait_again and spurious_wakeup stat counters. Because of lock stealing, it is no longer possible to accurately determine if spurious wakeup has happened in the queue head. As they track both the queue node and queue head status, it is also hard to tell how many of those comes from the queue head and how many from the queue node. This patch changes the accounting rules so that spurious wakeup is only tracked in the queue node. The wait_again count, however, is only tracked in the queue head when the vCPU failed to acquire the lock after a vCPU kick. This should give a much better indication of the wait-kick dynamics in the queue node and the queue head. Signed-off-by: Waiman Long <Waiman.Long@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Douglas Hatch <doug.hatch@hpe.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Pan Xinhui <xinhui@linux.vnet.ibm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Scott J Norton <scott.norton@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1464713631-1066-2-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
64a5e3cb30
commit
08be8f63c4
@ -288,12 +288,10 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
|
||||
{
|
||||
struct pv_node *pn = (struct pv_node *)node;
|
||||
struct pv_node *pp = (struct pv_node *)prev;
|
||||
int waitcnt = 0;
|
||||
int loop;
|
||||
bool wait_early;
|
||||
|
||||
/* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */
|
||||
for (;; waitcnt++) {
|
||||
for (;;) {
|
||||
for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
|
||||
if (READ_ONCE(node->locked))
|
||||
return;
|
||||
@ -317,7 +315,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
|
||||
|
||||
if (!READ_ONCE(node->locked)) {
|
||||
qstat_inc(qstat_pv_wait_node, true);
|
||||
qstat_inc(qstat_pv_wait_again, waitcnt);
|
||||
qstat_inc(qstat_pv_wait_early, wait_early);
|
||||
pv_wait(&pn->state, vcpu_halted);
|
||||
}
|
||||
@ -458,12 +455,9 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
|
||||
pv_wait(&l->locked, _Q_SLOW_VAL);
|
||||
|
||||
/*
|
||||
* The unlocker should have freed the lock before kicking the
|
||||
* CPU. So if the lock is still not free, it is a spurious
|
||||
* wakeup or another vCPU has stolen the lock. The current
|
||||
* vCPU should spin again.
|
||||
* Because of lock stealing, the queue head vCPU may not be
|
||||
* able to acquire the lock before it has to wait again.
|
||||
*/
|
||||
qstat_inc(qstat_pv_spurious_wakeup, READ_ONCE(l->locked));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -24,8 +24,8 @@
|
||||
* pv_latency_wake - average latency (ns) from vCPU kick to wakeup
|
||||
* pv_lock_slowpath - # of locking operations via the slowpath
|
||||
* pv_lock_stealing - # of lock stealing operations
|
||||
* pv_spurious_wakeup - # of spurious wakeups
|
||||
* pv_wait_again - # of vCPU wait's that happened after a vCPU kick
|
||||
* pv_spurious_wakeup - # of spurious wakeups in non-head vCPUs
|
||||
* pv_wait_again - # of wait's after a queue head vCPU kick
|
||||
* pv_wait_early - # of early vCPU wait's
|
||||
* pv_wait_head - # of vCPU wait's at the queue head
|
||||
* pv_wait_node - # of vCPU wait's at a non-head queue node
|
||||
|
Loading…
Reference in New Issue
Block a user