mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
ff7138813a
lockevent_read() has a __weak definition and the only caller in kernel/locking/lock_events.c, plus a strong definition in qspinlock_stat.h that overrides it, but no other declaration. This causes a W=1 warning: kernel/locking/lock_events.c:61:16: error: no previous prototype for 'lockevent_read' [-Werror=missing-prototypes] Add shared prototype to avoid the warnings. Link: https://lkml.kernel.org/r/20230517131102.934196-7-arnd@kernel.org Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Eric Paris <eparis@redhat.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Moore <paul@paul-moore.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rafael@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <longman@redhat.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
65 lines
1.8 KiB
C
65 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* Authors: Waiman Long <longman@redhat.com>
|
|
*/
|
|
|
|
#ifndef __LOCKING_LOCK_EVENTS_H
|
|
#define __LOCKING_LOCK_EVENTS_H
|
|
|
|
enum lock_events {
|
|
|
|
#include "lock_events_list.h"
|
|
|
|
lockevent_num, /* Total number of lock event counts */
|
|
LOCKEVENT_reset_cnts = lockevent_num,
|
|
};
|
|
|
|
#ifdef CONFIG_LOCK_EVENT_COUNTS
|
|
/*
|
|
* Per-cpu counters
|
|
*/
|
|
DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);
|
|
|
|
/*
|
|
* Increment the statistical counters. use raw_cpu_inc() because of lower
|
|
* overhead and we don't care if we loose the occasional update.
|
|
*/
|
|
static inline void __lockevent_inc(enum lock_events event, bool cond)
|
|
{
|
|
if (cond)
|
|
raw_cpu_inc(lockevents[event]);
|
|
}
|
|
|
|
#define lockevent_inc(ev) __lockevent_inc(LOCKEVENT_ ##ev, true)
|
|
#define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)
|
|
|
|
static inline void __lockevent_add(enum lock_events event, int inc)
|
|
{
|
|
raw_cpu_add(lockevents[event], inc);
|
|
}
|
|
|
|
#define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c)
|
|
|
|
#else /* CONFIG_LOCK_EVENT_COUNTS */
|
|
|
|
#define lockevent_inc(ev)
|
|
#define lockevent_add(ev, c)
|
|
#define lockevent_cond_inc(ev, c)
|
|
|
|
#endif /* CONFIG_LOCK_EVENT_COUNTS */
|
|
|
|
ssize_t lockevent_read(struct file *file, char __user *user_buf,
|
|
size_t count, loff_t *ppos);
|
|
|
|
#endif /* __LOCKING_LOCK_EVENTS_H */
|