mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
fb346fd9fc
The QUEUED_LOCK_STAT option to report queued spinlocks event counts was previously allowed only on x86 architecture. To make the locking event counting code more useful, it is now renamed to a more generic LOCK_EVENT_COUNTS config option. This new option will be available to all the architectures that use qspinlock at the moment. Other locking code can now start to use the generic locking event counting code by including lock_events.h and put the new locking event names into the lock_events_list.h header file. My experience with lock event counting is that it gives valuable insight on how the locking code works and what can be done to make it better. I would like to extend this benefit to other locking code like mutex and rwsem in the near future. The PV qspinlock specific code will stay in qspinlock_stat.h. The locking event counters will now reside in the <debugfs>/lock_event_counts directory. Signed-off-by: Waiman Long <longman@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Davidlohr Bueso <dbueso@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/20190404174320.22416-9-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
60 lines
1.6 KiB
C
60 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* Authors: Waiman Long <longman@redhat.com>
|
|
*/
|
|
|
|
#ifndef __LOCKING_LOCK_EVENTS_H
|
|
#define __LOCKING_LOCK_EVENTS_H
|
|
|
|
enum lock_events {
|
|
|
|
#include "lock_events_list.h"
|
|
|
|
lockevent_num, /* Total number of lock event counts */
|
|
LOCKEVENT_reset_cnts = lockevent_num,
|
|
};
|
|
|
|
#ifdef CONFIG_LOCK_EVENT_COUNTS
|
|
/*
|
|
* Per-cpu counters
|
|
*/
|
|
DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);
|
|
|
|
/*
|
|
* Increment the PV qspinlock statistical counters
|
|
*/
|
|
static inline void __lockevent_inc(enum lock_events event, bool cond)
|
|
{
|
|
if (cond)
|
|
__this_cpu_inc(lockevents[event]);
|
|
}
|
|
|
|
#define lockevent_inc(ev) __lockevent_inc(LOCKEVENT_ ##ev, true)
|
|
#define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)
|
|
|
|
static inline void __lockevent_add(enum lock_events event, int inc)
|
|
{
|
|
__this_cpu_add(lockevents[event], inc);
|
|
}
|
|
|
|
#define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c)
|
|
|
|
#else /* CONFIG_LOCK_EVENT_COUNTS */
|
|
|
|
#define lockevent_inc(ev)
|
|
#define lockevent_add(ev, c)
|
|
#define lockevent_cond_inc(ev, c)
|
|
|
|
#endif /* CONFIG_LOCK_EVENT_COUNTS */
|
|
#endif /* __LOCKING_LOCK_EVENTS_H */
|