mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
ae813308f4
Two boots + a make defconfig, the first didn't have the redundant bit in, the second did: lock-classes: 1168 1169 [max: 8191] direct dependencies: 7688 5812 [max: 32768] indirect dependencies: 25492 25937 all direct dependencies: 220113 217512 dependency chains: 9005 9008 [max: 65536] dependency chain hlocks: 34450 34366 [max: 327680] in-hardirq chains: 55 51 in-softirq chains: 371 378 in-process chains: 8579 8579 stack-trace entries: 108073 88474 [max: 524288] combined max dependencies: 178738560 169094640 max locking depth: 15 15 max bfs queue depth: 320 329 cyclic checks: 9123 9190 redundant checks: 5046 redundant links: 1828 find-mask forwards checks: 2564 2599 find-mask backwards checks: 39521 39789 So it saves nearly 2k links and a fair chunk of stack-trace entries, but as expected, makes no real difference on the indirect dependencies. At the same time, you see the max BFS depth increase, which is also expected, although it could easily be boot variance -- these numbers are not entirely stable between boots. The down side is that the cycles in the graph become larger and thus the reports harder to read. XXX: do we want this as a CONFIG variable, implied by LOCKDEP_SMALL? Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Nikolay Borisov <nborisov@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akpm@linux-foundation.org Cc: boqun.feng@gmail.com Cc: iamjoonsoo.kim@lge.com Cc: kernel-team@lge.com Cc: kirill@shutemov.name Cc: npiggin@gmail.com Cc: walken@google.com Link: http://lkml.kernel.org/r/20170303091338.GH6536@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
702 lines
18 KiB
C
702 lines
18 KiB
C
/*
|
|
* kernel/lockdep_proc.c
|
|
*
|
|
* Runtime locking correctness validator
|
|
*
|
|
* Started by Ingo Molnar:
|
|
*
|
|
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
|
*
|
|
* Code for /proc/lockdep and /proc/lockdep_stats:
|
|
*
|
|
*/
|
|
#include <linux/export.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/debug_locks.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/sort.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/div64.h>
|
|
|
|
#include "lockdep_internals.h"
|
|
|
|
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
|
|
{
|
|
return seq_list_next(v, &all_lock_classes, pos);
|
|
}
|
|
|
|
static void *l_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
return seq_list_start_head(&all_lock_classes, *pos);
|
|
}
|
|
|
|
static void l_stop(struct seq_file *m, void *v)
|
|
{
|
|
}
|
|
|
|
static void print_name(struct seq_file *m, struct lock_class *class)
|
|
{
|
|
char str[KSYM_NAME_LEN];
|
|
const char *name = class->name;
|
|
|
|
if (!name) {
|
|
name = __get_key_name(class->key, str);
|
|
seq_printf(m, "%s", name);
|
|
} else{
|
|
seq_printf(m, "%s", name);
|
|
if (class->name_version > 1)
|
|
seq_printf(m, "#%d", class->name_version);
|
|
if (class->subclass)
|
|
seq_printf(m, "/%d", class->subclass);
|
|
}
|
|
}
|
|
|
|
static int l_show(struct seq_file *m, void *v)
|
|
{
|
|
struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
|
|
struct lock_list *entry;
|
|
char usage[LOCK_USAGE_CHARS];
|
|
|
|
if (v == &all_lock_classes) {
|
|
seq_printf(m, "all lock classes:\n");
|
|
return 0;
|
|
}
|
|
|
|
seq_printf(m, "%p", class->key);
|
|
#ifdef CONFIG_DEBUG_LOCKDEP
|
|
seq_printf(m, " OPS:%8ld", class->ops);
|
|
#endif
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
|
|
seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
|
|
#endif
|
|
|
|
get_usage_chars(class, usage);
|
|
seq_printf(m, " %s", usage);
|
|
|
|
seq_printf(m, ": ");
|
|
print_name(m, class);
|
|
seq_puts(m, "\n");
|
|
|
|
list_for_each_entry(entry, &class->locks_after, entry) {
|
|
if (entry->distance == 1) {
|
|
seq_printf(m, " -> [%p] ", entry->class->key);
|
|
print_name(m, entry->class);
|
|
seq_puts(m, "\n");
|
|
}
|
|
}
|
|
seq_puts(m, "\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations lockdep_ops = {
|
|
.start = l_start,
|
|
.next = l_next,
|
|
.stop = l_stop,
|
|
.show = l_show,
|
|
};
|
|
|
|
static int lockdep_open(struct inode *inode, struct file *file)
|
|
{
|
|
return seq_open(file, &lockdep_ops);
|
|
}
|
|
|
|
static const struct file_operations proc_lockdep_operations = {
|
|
.open = lockdep_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
static void *lc_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
if (*pos == 0)
|
|
return SEQ_START_TOKEN;
|
|
|
|
if (*pos - 1 < nr_lock_chains)
|
|
return lock_chains + (*pos - 1);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
|
|
{
|
|
(*pos)++;
|
|
return lc_start(m, pos);
|
|
}
|
|
|
|
static void lc_stop(struct seq_file *m, void *v)
|
|
{
|
|
}
|
|
|
|
static int lc_show(struct seq_file *m, void *v)
|
|
{
|
|
struct lock_chain *chain = v;
|
|
struct lock_class *class;
|
|
int i;
|
|
|
|
if (v == SEQ_START_TOKEN) {
|
|
if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
|
|
seq_printf(m, "(buggered) ");
|
|
seq_printf(m, "all lock chains:\n");
|
|
return 0;
|
|
}
|
|
|
|
seq_printf(m, "irq_context: %d\n", chain->irq_context);
|
|
|
|
for (i = 0; i < chain->depth; i++) {
|
|
class = lock_chain_get_class(chain, i);
|
|
if (!class->key)
|
|
continue;
|
|
|
|
seq_printf(m, "[%p] ", class->key);
|
|
print_name(m, class);
|
|
seq_puts(m, "\n");
|
|
}
|
|
seq_puts(m, "\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations lockdep_chains_ops = {
|
|
.start = lc_start,
|
|
.next = lc_next,
|
|
.stop = lc_stop,
|
|
.show = lc_show,
|
|
};
|
|
|
|
static int lockdep_chains_open(struct inode *inode, struct file *file)
|
|
{
|
|
return seq_open(file, &lockdep_chains_ops);
|
|
}
|
|
|
|
static const struct file_operations proc_lockdep_chains_operations = {
|
|
.open = lockdep_chains_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|
|
#endif /* CONFIG_PROVE_LOCKING */
|
|
|
|
static void lockdep_stats_debug_show(struct seq_file *m)
|
|
{
|
|
#ifdef CONFIG_DEBUG_LOCKDEP
|
|
unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
|
|
hi2 = debug_atomic_read(hardirqs_off_events),
|
|
hr1 = debug_atomic_read(redundant_hardirqs_on),
|
|
hr2 = debug_atomic_read(redundant_hardirqs_off),
|
|
si1 = debug_atomic_read(softirqs_on_events),
|
|
si2 = debug_atomic_read(softirqs_off_events),
|
|
sr1 = debug_atomic_read(redundant_softirqs_on),
|
|
sr2 = debug_atomic_read(redundant_softirqs_off);
|
|
|
|
seq_printf(m, " chain lookup misses: %11llu\n",
|
|
debug_atomic_read(chain_lookup_misses));
|
|
seq_printf(m, " chain lookup hits: %11llu\n",
|
|
debug_atomic_read(chain_lookup_hits));
|
|
seq_printf(m, " cyclic checks: %11llu\n",
|
|
debug_atomic_read(nr_cyclic_checks));
|
|
seq_printf(m, " redundant checks: %11llu\n",
|
|
debug_atomic_read(nr_redundant_checks));
|
|
seq_printf(m, " redundant links: %11llu\n",
|
|
debug_atomic_read(nr_redundant));
|
|
seq_printf(m, " find-mask forwards checks: %11llu\n",
|
|
debug_atomic_read(nr_find_usage_forwards_checks));
|
|
seq_printf(m, " find-mask backwards checks: %11llu\n",
|
|
debug_atomic_read(nr_find_usage_backwards_checks));
|
|
|
|
seq_printf(m, " hardirq on events: %11llu\n", hi1);
|
|
seq_printf(m, " hardirq off events: %11llu\n", hi2);
|
|
seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
|
|
seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
|
|
seq_printf(m, " softirq on events: %11llu\n", si1);
|
|
seq_printf(m, " softirq off events: %11llu\n", si2);
|
|
seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
|
|
seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
|
|
#endif
|
|
}
|
|
|
|
static int lockdep_stats_show(struct seq_file *m, void *v)
|
|
{
|
|
struct lock_class *class;
|
|
unsigned long nr_unused = 0, nr_uncategorized = 0,
|
|
nr_irq_safe = 0, nr_irq_unsafe = 0,
|
|
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
|
|
nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
|
|
nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
|
|
nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
|
|
nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
|
|
sum_forward_deps = 0;
|
|
|
|
list_for_each_entry(class, &all_lock_classes, lock_entry) {
|
|
|
|
if (class->usage_mask == 0)
|
|
nr_unused++;
|
|
if (class->usage_mask == LOCKF_USED)
|
|
nr_uncategorized++;
|
|
if (class->usage_mask & LOCKF_USED_IN_IRQ)
|
|
nr_irq_safe++;
|
|
if (class->usage_mask & LOCKF_ENABLED_IRQ)
|
|
nr_irq_unsafe++;
|
|
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
|
|
nr_softirq_safe++;
|
|
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
|
|
nr_softirq_unsafe++;
|
|
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
|
|
nr_hardirq_safe++;
|
|
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
|
|
nr_hardirq_unsafe++;
|
|
if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
|
|
nr_irq_read_safe++;
|
|
if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
|
|
nr_irq_read_unsafe++;
|
|
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
|
|
nr_softirq_read_safe++;
|
|
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
|
|
nr_softirq_read_unsafe++;
|
|
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
|
|
nr_hardirq_read_safe++;
|
|
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
|
|
nr_hardirq_read_unsafe++;
|
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
sum_forward_deps += lockdep_count_forward_deps(class);
|
|
#endif
|
|
}
|
|
#ifdef CONFIG_DEBUG_LOCKDEP
|
|
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
|
|
#endif
|
|
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
|
|
nr_lock_classes, MAX_LOCKDEP_KEYS);
|
|
seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
|
|
nr_list_entries, MAX_LOCKDEP_ENTRIES);
|
|
seq_printf(m, " indirect dependencies: %11lu\n",
|
|
sum_forward_deps);
|
|
|
|
/*
|
|
* Total number of dependencies:
|
|
*
|
|
* All irq-safe locks may nest inside irq-unsafe locks,
|
|
* plus all the other known dependencies:
|
|
*/
|
|
seq_printf(m, " all direct dependencies: %11lu\n",
|
|
nr_irq_unsafe * nr_irq_safe +
|
|
nr_hardirq_unsafe * nr_hardirq_safe +
|
|
nr_list_entries);
|
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
|
|
nr_lock_chains, MAX_LOCKDEP_CHAINS);
|
|
seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
|
|
nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
seq_printf(m, " in-hardirq chains: %11u\n",
|
|
nr_hardirq_chains);
|
|
seq_printf(m, " in-softirq chains: %11u\n",
|
|
nr_softirq_chains);
|
|
#endif
|
|
seq_printf(m, " in-process chains: %11u\n",
|
|
nr_process_chains);
|
|
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
|
|
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
|
|
seq_printf(m, " combined max dependencies: %11u\n",
|
|
(nr_hardirq_chains + 1) *
|
|
(nr_softirq_chains + 1) *
|
|
(nr_process_chains + 1)
|
|
);
|
|
seq_printf(m, " hardirq-safe locks: %11lu\n",
|
|
nr_hardirq_safe);
|
|
seq_printf(m, " hardirq-unsafe locks: %11lu\n",
|
|
nr_hardirq_unsafe);
|
|
seq_printf(m, " softirq-safe locks: %11lu\n",
|
|
nr_softirq_safe);
|
|
seq_printf(m, " softirq-unsafe locks: %11lu\n",
|
|
nr_softirq_unsafe);
|
|
seq_printf(m, " irq-safe locks: %11lu\n",
|
|
nr_irq_safe);
|
|
seq_printf(m, " irq-unsafe locks: %11lu\n",
|
|
nr_irq_unsafe);
|
|
|
|
seq_printf(m, " hardirq-read-safe locks: %11lu\n",
|
|
nr_hardirq_read_safe);
|
|
seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
|
|
nr_hardirq_read_unsafe);
|
|
seq_printf(m, " softirq-read-safe locks: %11lu\n",
|
|
nr_softirq_read_safe);
|
|
seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
|
|
nr_softirq_read_unsafe);
|
|
seq_printf(m, " irq-read-safe locks: %11lu\n",
|
|
nr_irq_read_safe);
|
|
seq_printf(m, " irq-read-unsafe locks: %11lu\n",
|
|
nr_irq_read_unsafe);
|
|
|
|
seq_printf(m, " uncategorized locks: %11lu\n",
|
|
nr_uncategorized);
|
|
seq_printf(m, " unused locks: %11lu\n",
|
|
nr_unused);
|
|
seq_printf(m, " max locking depth: %11u\n",
|
|
max_lockdep_depth);
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
seq_printf(m, " max bfs queue depth: %11u\n",
|
|
max_bfs_queue_depth);
|
|
#endif
|
|
lockdep_stats_debug_show(m);
|
|
seq_printf(m, " debug_locks: %11u\n",
|
|
debug_locks);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int lockdep_stats_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, lockdep_stats_show, NULL);
|
|
}
|
|
|
|
static const struct file_operations proc_lockdep_stats_operations = {
|
|
.open = lockdep_stats_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
#ifdef CONFIG_LOCK_STAT
|
|
|
|
struct lock_stat_data {
|
|
struct lock_class *class;
|
|
struct lock_class_stats stats;
|
|
};
|
|
|
|
struct lock_stat_seq {
|
|
struct lock_stat_data *iter_end;
|
|
struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
|
|
};
|
|
|
|
/*
|
|
* sort on absolute number of contentions
|
|
*/
|
|
static int lock_stat_cmp(const void *l, const void *r)
|
|
{
|
|
const struct lock_stat_data *dl = l, *dr = r;
|
|
unsigned long nl, nr;
|
|
|
|
nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
|
|
nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
|
|
|
|
return nr - nl;
|
|
}
|
|
|
|
static void seq_line(struct seq_file *m, char c, int offset, int length)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < offset; i++)
|
|
seq_puts(m, " ");
|
|
for (i = 0; i < length; i++)
|
|
seq_printf(m, "%c", c);
|
|
seq_puts(m, "\n");
|
|
}
|
|
|
|
static void snprint_time(char *buf, size_t bufsiz, s64 nr)
|
|
{
|
|
s64 div;
|
|
s32 rem;
|
|
|
|
nr += 5; /* for display rounding */
|
|
div = div_s64_rem(nr, 1000, &rem);
|
|
snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
|
|
}
|
|
|
|
static void seq_time(struct seq_file *m, s64 time)
|
|
{
|
|
char num[15];
|
|
|
|
snprint_time(num, sizeof(num), time);
|
|
seq_printf(m, " %14s", num);
|
|
}
|
|
|
|
static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
|
|
{
|
|
seq_printf(m, "%14lu", lt->nr);
|
|
seq_time(m, lt->min);
|
|
seq_time(m, lt->max);
|
|
seq_time(m, lt->total);
|
|
seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
|
|
}
|
|
|
|
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
|
|
{
|
|
struct lockdep_subclass_key *ckey;
|
|
struct lock_class_stats *stats;
|
|
struct lock_class *class;
|
|
const char *cname;
|
|
int i, namelen;
|
|
char name[39];
|
|
|
|
class = data->class;
|
|
stats = &data->stats;
|
|
|
|
namelen = 38;
|
|
if (class->name_version > 1)
|
|
namelen -= 2; /* XXX truncates versions > 9 */
|
|
if (class->subclass)
|
|
namelen -= 2;
|
|
|
|
rcu_read_lock_sched();
|
|
cname = rcu_dereference_sched(class->name);
|
|
ckey = rcu_dereference_sched(class->key);
|
|
|
|
if (!cname && !ckey) {
|
|
rcu_read_unlock_sched();
|
|
return;
|
|
|
|
} else if (!cname) {
|
|
char str[KSYM_NAME_LEN];
|
|
const char *key_name;
|
|
|
|
key_name = __get_key_name(ckey, str);
|
|
snprintf(name, namelen, "%s", key_name);
|
|
} else {
|
|
snprintf(name, namelen, "%s", cname);
|
|
}
|
|
rcu_read_unlock_sched();
|
|
|
|
namelen = strlen(name);
|
|
if (class->name_version > 1) {
|
|
snprintf(name+namelen, 3, "#%d", class->name_version);
|
|
namelen += 2;
|
|
}
|
|
if (class->subclass) {
|
|
snprintf(name+namelen, 3, "/%d", class->subclass);
|
|
namelen += 2;
|
|
}
|
|
|
|
if (stats->write_holdtime.nr) {
|
|
if (stats->read_holdtime.nr)
|
|
seq_printf(m, "%38s-W:", name);
|
|
else
|
|
seq_printf(m, "%40s:", name);
|
|
|
|
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
|
|
seq_lock_time(m, &stats->write_waittime);
|
|
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
|
|
seq_lock_time(m, &stats->write_holdtime);
|
|
seq_puts(m, "\n");
|
|
}
|
|
|
|
if (stats->read_holdtime.nr) {
|
|
seq_printf(m, "%38s-R:", name);
|
|
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
|
|
seq_lock_time(m, &stats->read_waittime);
|
|
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
|
|
seq_lock_time(m, &stats->read_holdtime);
|
|
seq_puts(m, "\n");
|
|
}
|
|
|
|
if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
|
|
return;
|
|
|
|
if (stats->read_holdtime.nr)
|
|
namelen += 2;
|
|
|
|
for (i = 0; i < LOCKSTAT_POINTS; i++) {
|
|
char ip[32];
|
|
|
|
if (class->contention_point[i] == 0)
|
|
break;
|
|
|
|
if (!i)
|
|
seq_line(m, '-', 40-namelen, namelen);
|
|
|
|
snprintf(ip, sizeof(ip), "[<%p>]",
|
|
(void *)class->contention_point[i]);
|
|
seq_printf(m, "%40s %14lu %29s %pS\n",
|
|
name, stats->contention_point[i],
|
|
ip, (void *)class->contention_point[i]);
|
|
}
|
|
for (i = 0; i < LOCKSTAT_POINTS; i++) {
|
|
char ip[32];
|
|
|
|
if (class->contending_point[i] == 0)
|
|
break;
|
|
|
|
if (!i)
|
|
seq_line(m, '-', 40-namelen, namelen);
|
|
|
|
snprintf(ip, sizeof(ip), "[<%p>]",
|
|
(void *)class->contending_point[i]);
|
|
seq_printf(m, "%40s %14lu %29s %pS\n",
|
|
name, stats->contending_point[i],
|
|
ip, (void *)class->contending_point[i]);
|
|
}
|
|
if (i) {
|
|
seq_puts(m, "\n");
|
|
seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
|
|
seq_puts(m, "\n");
|
|
}
|
|
}
|
|
|
|
static void seq_header(struct seq_file *m)
|
|
{
|
|
seq_puts(m, "lock_stat version 0.4\n");
|
|
|
|
if (unlikely(!debug_locks))
|
|
seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
|
|
|
|
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
|
|
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
|
|
"%14s %14s\n",
|
|
"class name",
|
|
"con-bounces",
|
|
"contentions",
|
|
"waittime-min",
|
|
"waittime-max",
|
|
"waittime-total",
|
|
"waittime-avg",
|
|
"acq-bounces",
|
|
"acquisitions",
|
|
"holdtime-min",
|
|
"holdtime-max",
|
|
"holdtime-total",
|
|
"holdtime-avg");
|
|
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
|
|
seq_printf(m, "\n");
|
|
}
|
|
|
|
static void *ls_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
struct lock_stat_seq *data = m->private;
|
|
struct lock_stat_data *iter;
|
|
|
|
if (*pos == 0)
|
|
return SEQ_START_TOKEN;
|
|
|
|
iter = data->stats + (*pos - 1);
|
|
if (iter >= data->iter_end)
|
|
iter = NULL;
|
|
|
|
return iter;
|
|
}
|
|
|
|
static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
|
|
{
|
|
(*pos)++;
|
|
return ls_start(m, pos);
|
|
}
|
|
|
|
static void ls_stop(struct seq_file *m, void *v)
|
|
{
|
|
}
|
|
|
|
static int ls_show(struct seq_file *m, void *v)
|
|
{
|
|
if (v == SEQ_START_TOKEN)
|
|
seq_header(m);
|
|
else
|
|
seq_stats(m, v);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations lockstat_ops = {
|
|
.start = ls_start,
|
|
.next = ls_next,
|
|
.stop = ls_stop,
|
|
.show = ls_show,
|
|
};
|
|
|
|
static int lock_stat_open(struct inode *inode, struct file *file)
|
|
{
|
|
int res;
|
|
struct lock_class *class;
|
|
struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
|
|
|
|
if (!data)
|
|
return -ENOMEM;
|
|
|
|
res = seq_open(file, &lockstat_ops);
|
|
if (!res) {
|
|
struct lock_stat_data *iter = data->stats;
|
|
struct seq_file *m = file->private_data;
|
|
|
|
list_for_each_entry(class, &all_lock_classes, lock_entry) {
|
|
iter->class = class;
|
|
iter->stats = lock_stats(class);
|
|
iter++;
|
|
}
|
|
data->iter_end = iter;
|
|
|
|
sort(data->stats, data->iter_end - data->stats,
|
|
sizeof(struct lock_stat_data),
|
|
lock_stat_cmp, NULL);
|
|
|
|
m->private = data;
|
|
} else
|
|
vfree(data);
|
|
|
|
return res;
|
|
}
|
|
|
|
static ssize_t lock_stat_write(struct file *file, const char __user *buf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
struct lock_class *class;
|
|
char c;
|
|
|
|
if (count) {
|
|
if (get_user(c, buf))
|
|
return -EFAULT;
|
|
|
|
if (c != '0')
|
|
return count;
|
|
|
|
list_for_each_entry(class, &all_lock_classes, lock_entry)
|
|
clear_lock_stats(class);
|
|
}
|
|
return count;
|
|
}
|
|
|
|
static int lock_stat_release(struct inode *inode, struct file *file)
|
|
{
|
|
struct seq_file *seq = file->private_data;
|
|
|
|
vfree(seq->private);
|
|
return seq_release(inode, file);
|
|
}
|
|
|
|
static const struct file_operations proc_lock_stat_operations = {
|
|
.open = lock_stat_open,
|
|
.write = lock_stat_write,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = lock_stat_release,
|
|
};
|
|
#endif /* CONFIG_LOCK_STAT */
|
|
|
|
static int __init lockdep_proc_init(void)
|
|
{
|
|
proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
proc_create("lockdep_chains", S_IRUSR, NULL,
|
|
&proc_lockdep_chains_operations);
|
|
#endif
|
|
proc_create("lockdep_stats", S_IRUSR, NULL,
|
|
&proc_lockdep_stats_operations);
|
|
|
|
#ifdef CONFIG_LOCK_STAT
|
|
proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
|
|
&proc_lock_stat_operations);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
__initcall(lockdep_proc_init);
|
|
|