2018-08-16 15:23:53 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-05-12 19:20:42 +00:00
|
|
|
/*
|
|
|
|
* ring buffer based function tracer
|
|
|
|
*
|
2012-05-11 17:29:49 +00:00
|
|
|
* Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
|
2008-05-12 19:20:42 +00:00
|
|
|
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
|
|
|
*
|
|
|
|
* Originally taken from the RT patch by:
|
|
|
|
* Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
|
*
|
|
|
|
* Based on code from the latency_tracer, that is:
|
|
|
|
* Copyright (C) 2004-2006 Ingo Molnar
|
2012-12-06 09:39:54 +00:00
|
|
|
* Copyright (C) 2004 Nadia Yvette Chambers
|
2008-05-12 19:20:42 +00:00
|
|
|
*/
|
2008-12-02 03:20:19 +00:00
|
|
|
#include <linux/ring_buffer.h>
|
2009-10-17 22:52:28 +00:00
|
|
|
#include <generated/utsrelease.h>
|
2008-12-02 03:20:19 +00:00
|
|
|
#include <linux/stacktrace.h>
|
|
|
|
#include <linux/writeback.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
#include <linux/kallsyms.h>
|
2019-10-11 21:22:50 +00:00
|
|
|
#include <linux/security.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
#include <linux/seq_file.h>
|
2008-07-31 02:36:46 +00:00
|
|
|
#include <linux/notifier.h>
|
2008-12-02 03:20:19 +00:00
|
|
|
#include <linux/irqflags.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
#include <linux/debugfs.h>
|
2015-01-20 17:13:40 +00:00
|
|
|
#include <linux/tracefs.h>
|
2008-05-12 19:20:43 +00:00
|
|
|
#include <linux/pagemap.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/uaccess.h>
|
2016-04-21 15:35:30 +00:00
|
|
|
#include <linux/vmalloc.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
#include <linux/ftrace.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/percpu.h>
|
2008-12-02 03:20:19 +00:00
|
|
|
#include <linux/splice.h>
|
2008-07-31 02:36:46 +00:00
|
|
|
#include <linux/kdebug.h>
|
2009-03-27 13:22:10 +00:00
|
|
|
#include <linux/string.h>
|
2015-01-20 20:48:46 +00:00
|
|
|
#include <linux/mount.h>
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
#include <linux/rwsem.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/init.h>
|
2008-05-12 19:20:49 +00:00
|
|
|
#include <linux/poll.h>
|
2012-03-02 03:06:48 +00:00
|
|
|
#include <linux/nmi.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
#include <linux/fs.h>
|
2016-11-21 07:57:18 +00:00
|
|
|
#include <linux/trace.h>
|
2018-03-30 15:01:31 +00:00
|
|
|
#include <linux/sched/clock.h>
|
2013-02-07 15:47:07 +00:00
|
|
|
#include <linux/sched/rt.h>
|
2019-10-08 22:08:21 +00:00
|
|
|
#include <linux/fsnotify.h>
|
|
|
|
#include <linux/irq_work.h>
|
|
|
|
#include <linux/workqueue.h>
|
2008-05-12 19:20:51 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
#include "trace.h"
|
2008-12-24 04:24:12 +00:00
|
|
|
#include "trace_output.h"
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-03-11 17:42:01 +00:00
|
|
|
/*
|
|
|
|
* On boot up, the ring buffer is set to the minimum size, so that
|
|
|
|
* we do not waste memory on systems that are not using tracing.
|
|
|
|
*/
|
2013-03-08 03:48:09 +00:00
|
|
|
bool ring_buffer_expanded;
|
2009-03-11 17:42:01 +00:00
|
|
|
|
2008-12-06 02:41:33 +00:00
|
|
|
/*
|
|
|
|
* We need to change this state when a selftest is running.
|
2008-12-04 22:47:35 +00:00
|
|
|
* A selftest will lurk into the ring-buffer to count the
|
|
|
|
* entries inserted during the selftest although some concurrent
|
2009-03-05 09:24:48 +00:00
|
|
|
* insertions into the ring-buffer such as trace_printk could occurred
|
2008-12-04 22:47:35 +00:00
|
|
|
* at the same time, giving false positive or negative results.
|
|
|
|
*/
|
2008-12-06 02:41:33 +00:00
|
|
|
static bool __read_mostly tracing_selftest_running;
|
2008-12-04 22:47:35 +00:00
|
|
|
|
2009-02-03 02:38:32 +00:00
|
|
|
/*
|
|
|
|
* If a tracer is running, we do not want to run SELFTEST.
|
|
|
|
*/
|
2009-07-01 02:47:05 +00:00
|
|
|
bool __read_mostly tracing_selftest_disabled;
|
2009-02-03 02:38:32 +00:00
|
|
|
|
2014-12-13 03:27:10 +00:00
|
|
|
/* Pipe tracepoints to printk */
|
|
|
|
struct trace_iterator *tracepoint_print_iter;
|
|
|
|
int tracepoint_printk;
|
2016-11-23 20:52:45 +00:00
|
|
|
static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
|
2014-12-13 03:27:10 +00:00
|
|
|
|
2008-11-17 18:23:42 +00:00
|
|
|
/* For tracers that don't implement custom flags */
|
|
|
|
static struct tracer_opt dummy_tracer_opt[] = {
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
2014-01-10 16:13:54 +00:00
|
|
|
static int
|
|
|
|
dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
2008-11-17 18:23:42 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2008-11-05 21:05:44 +00:00
|
|
|
|
2012-10-11 16:14:25 +00:00
|
|
|
/*
|
|
|
|
* To prevent the comm cache from being overwritten when no
|
|
|
|
* tracing is active, only save the comm when a trace event
|
|
|
|
* occurred.
|
|
|
|
*/
|
2017-06-27 02:01:55 +00:00
|
|
|
static DEFINE_PER_CPU(bool, trace_taskinfo_save);
|
2012-10-11 16:14:25 +00:00
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
/*
|
|
|
|
* Kill all tracing for good (never come back).
|
|
|
|
* It is initialized to 1 but will turn to zero if the initialization
|
|
|
|
* of the tracer is successful. But that is the only place that sets
|
|
|
|
* this back to zero.
|
|
|
|
*/
|
2009-02-10 18:44:12 +00:00
|
|
|
static int tracing_disabled = 1;
|
2008-11-05 21:05:44 +00:00
|
|
|
|
2010-08-05 14:22:23 +00:00
|
|
|
cpumask_var_t __read_mostly tracing_buffer_mask;
|
2008-05-12 19:21:00 +00:00
|
|
|
|
2008-10-23 23:26:08 +00:00
|
|
|
/*
|
|
|
|
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
|
|
|
|
*
|
|
|
|
* If there is an oops (or kernel panic) and the ftrace_dump_on_oops
|
|
|
|
* is set, then ftrace_dump is called. This will output the contents
|
|
|
|
* of the ftrace buffers to the console. This is very useful for
|
|
|
|
* capturing traces that lead to crashes and outputing it to a
|
|
|
|
* serial console.
|
|
|
|
*
|
|
|
|
* It is default off, but you can enable it with either specifying
|
|
|
|
* "ftrace_dump_on_oops" in the kernel command line, or setting
|
2010-04-18 17:08:41 +00:00
|
|
|
* /proc/sys/kernel/ftrace_dump_on_oops
|
|
|
|
* Set 1 if you want to dump buffers of all CPUs
|
|
|
|
* Set 2 if you want to dump the buffer of the CPU that triggered oops
|
2008-10-23 23:26:08 +00:00
|
|
|
*/
|
2010-04-18 17:08:41 +00:00
|
|
|
|
|
|
|
enum ftrace_dump_mode ftrace_dump_on_oops;
|
2008-10-23 23:26:08 +00:00
|
|
|
|
2013-06-14 20:21:43 +00:00
|
|
|
/* When set, tracing will stop when a WARN*() is hit */
|
|
|
|
int __disable_trace_on_warning;
|
|
|
|
|
2017-05-31 21:56:53 +00:00
|
|
|
#ifdef CONFIG_TRACE_EVAL_MAP_FILE
|
|
|
|
/* Map of enums to their values, for "eval_map" file */
|
2017-05-31 21:56:45 +00:00
|
|
|
struct trace_eval_map_head {
|
2015-03-31 21:23:45 +00:00
|
|
|
struct module *mod;
|
|
|
|
unsigned long length;
|
|
|
|
};
|
|
|
|
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item;
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:45 +00:00
|
|
|
struct trace_eval_map_tail {
|
2015-03-31 21:23:45 +00:00
|
|
|
/*
|
|
|
|
* "end" is first and points to NULL as it must be different
|
2017-05-31 21:56:43 +00:00
|
|
|
* than "mod" or "eval_string"
|
2015-03-31 21:23:45 +00:00
|
|
|
*/
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item *next;
|
2015-03-31 21:23:45 +00:00
|
|
|
const char *end; /* points to NULL */
|
|
|
|
};
|
|
|
|
|
2017-05-31 21:56:46 +00:00
|
|
|
static DEFINE_MUTEX(trace_eval_mutex);
|
2015-03-31 21:23:45 +00:00
|
|
|
|
|
|
|
/*
|
2017-05-31 21:56:45 +00:00
|
|
|
* The trace_eval_maps are saved in an array with two extra elements,
|
2015-03-31 21:23:45 +00:00
|
|
|
* one at the beginning, and one at the end. The beginning item contains
|
|
|
|
* the count of the saved maps (head.length), and the module they
|
|
|
|
* belong to if not built in (head.mod). The ending item contains a
|
2017-05-31 21:56:53 +00:00
|
|
|
* pointer to the next array of saved eval_map items.
|
2015-03-31 21:23:45 +00:00
|
|
|
*/
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item {
|
2017-05-31 21:56:43 +00:00
|
|
|
struct trace_eval_map map;
|
2017-05-31 21:56:45 +00:00
|
|
|
struct trace_eval_map_head head;
|
|
|
|
struct trace_eval_map_tail tail;
|
2015-03-31 21:23:45 +00:00
|
|
|
};
|
|
|
|
|
2017-05-31 21:56:45 +00:00
|
|
|
static union trace_eval_map_item *trace_eval_maps;
|
2017-05-31 21:56:53 +00:00
|
|
|
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2020-01-10 16:06:17 +00:00
|
|
|
int tracing_set_tracer(struct trace_array *tr, const char *buf);
|
2019-12-13 18:58:57 +00:00
|
|
|
static void ftrace_trace_userstack(struct trace_buffer *buffer,
|
2019-04-25 09:45:15 +00:00
|
|
|
unsigned long flags, int pc);
|
2009-02-03 02:38:32 +00:00
|
|
|
|
2009-09-18 06:06:47 +00:00
|
|
|
#define MAX_TRACER_SIZE 100
|
|
|
|
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
|
2009-02-03 02:38:32 +00:00
|
|
|
static char *default_bootup_tracer;
|
2008-11-01 18:57:37 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
static bool allocate_snapshot;
|
|
|
|
|
2009-10-14 18:50:32 +00:00
|
|
|
static int __init set_cmdline_ftrace(char *str)
|
2008-11-01 18:57:37 +00:00
|
|
|
{
|
2013-04-08 04:06:44 +00:00
|
|
|
strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
|
2009-02-03 02:38:32 +00:00
|
|
|
default_bootup_tracer = bootup_tracer_buf;
|
2009-03-11 17:42:01 +00:00
|
|
|
/* We are using ftrace early, expand it */
|
2013-03-08 03:48:09 +00:00
|
|
|
ring_buffer_expanded = true;
|
2008-11-01 18:57:37 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2009-10-14 18:50:32 +00:00
|
|
|
__setup("ftrace=", set_cmdline_ftrace);
|
2008-11-01 18:57:37 +00:00
|
|
|
|
2008-10-23 23:26:08 +00:00
|
|
|
static int __init set_ftrace_dump_on_oops(char *str)
|
|
|
|
{
|
2010-04-18 17:08:41 +00:00
|
|
|
if (*str++ != '=' || !*str) {
|
|
|
|
ftrace_dump_on_oops = DUMP_ALL;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp("orig_cpu", str)) {
|
|
|
|
ftrace_dump_on_oops = DUMP_ORIG;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2008-10-23 23:26:08 +00:00
|
|
|
}
|
|
|
|
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
|
2008-05-12 19:20:44 +00:00
|
|
|
|
2013-06-14 20:21:43 +00:00
|
|
|
static int __init stop_trace_on_warning(char *str)
|
|
|
|
{
|
2014-11-12 23:14:00 +00:00
|
|
|
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
|
|
|
|
__disable_trace_on_warning = 1;
|
2013-06-14 20:21:43 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2014-11-12 23:14:00 +00:00
|
|
|
__setup("traceoff_on_warning", stop_trace_on_warning);
|
2013-06-14 20:21:43 +00:00
|
|
|
|
2013-03-12 15:17:54 +00:00
|
|
|
static int __init boot_alloc_snapshot(char *str)
|
2013-03-08 03:48:09 +00:00
|
|
|
{
|
|
|
|
allocate_snapshot = true;
|
|
|
|
/* We also need the main ring buffer expanded */
|
|
|
|
ring_buffer_expanded = true;
|
|
|
|
return 1;
|
|
|
|
}
|
2013-03-12 15:17:54 +00:00
|
|
|
__setup("alloc_snapshot", boot_alloc_snapshot);
|
2013-03-08 03:48:09 +00:00
|
|
|
|
2012-11-02 02:56:07 +00:00
|
|
|
|
|
|
|
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
|
|
|
|
|
|
|
|
static int __init set_trace_boot_options(char *str)
|
|
|
|
{
|
2013-04-08 04:06:44 +00:00
|
|
|
strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
|
2012-11-02 02:56:07 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__setup("trace_options=", set_trace_boot_options);
|
|
|
|
|
2014-02-11 04:38:46 +00:00
|
|
|
static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
|
|
|
|
static char *trace_boot_clock __initdata;
|
|
|
|
|
|
|
|
static int __init set_trace_boot_clock(char *str)
|
|
|
|
{
|
|
|
|
strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
|
|
|
|
trace_boot_clock = trace_boot_clock_buf;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__setup("trace_clock=", set_trace_boot_clock);
|
|
|
|
|
2014-12-13 03:27:10 +00:00
|
|
|
static int __init set_tracepoint_printk(char *str)
|
|
|
|
{
|
|
|
|
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
|
|
|
|
tracepoint_printk = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("tp_printk", set_tracepoint_printk);
|
2013-06-14 20:21:43 +00:00
|
|
|
|
2016-12-21 19:32:01 +00:00
|
|
|
unsigned long long ns2usecs(u64 nsec)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
|
|
|
nsec += 500;
|
|
|
|
do_div(nsec, 1000);
|
|
|
|
return nsec;
|
|
|
|
}
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
/* trace_flags holds trace_options default values */
|
|
|
|
#define TRACE_DEFAULT_FLAGS \
|
|
|
|
(FUNCTION_DEFAULT_FLAGS | \
|
|
|
|
TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
|
|
|
|
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
|
|
|
|
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
|
|
|
|
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
|
|
|
|
|
2015-09-30 16:30:06 +00:00
|
|
|
/* trace_options that are only supported by global_trace */
|
|
|
|
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
|
|
|
|
TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
|
|
|
|
|
2016-04-26 02:40:12 +00:00
|
|
|
/* trace_flags that are default zero for instances */
|
|
|
|
#define ZEROED_TRACE_FLAGS \
|
2017-04-17 02:44:28 +00:00
|
|
|
(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
|
2015-09-30 16:30:06 +00:00
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
/*
|
2017-02-17 04:10:58 +00:00
|
|
|
* The global_trace is the descriptor that holds the top-level tracing
|
|
|
|
* buffers for the live tracing.
|
2008-05-12 19:21:00 +00:00
|
|
|
*/
|
2015-09-30 13:42:05 +00:00
|
|
|
static struct trace_array global_trace = {
|
|
|
|
.trace_flags = TRACE_DEFAULT_FLAGS,
|
|
|
|
};
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2012-05-04 03:09:03 +00:00
|
|
|
LIST_HEAD(ftrace_trace_arrays);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2013-07-02 02:50:29 +00:00
|
|
|
int trace_array_get(struct trace_array *this_tr)
|
|
|
|
{
|
|
|
|
struct trace_array *tr;
|
|
|
|
int ret = -ENODEV;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
|
|
|
if (tr == this_tr) {
|
|
|
|
tr->ref++;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __trace_array_put(struct trace_array *this_tr)
|
|
|
|
{
|
|
|
|
WARN_ON(!this_tr->ref);
|
|
|
|
this_tr->ref--;
|
|
|
|
}
|
|
|
|
|
2019-11-20 19:08:38 +00:00
|
|
|
/**
|
|
|
|
* trace_array_put - Decrement the reference counter for this trace array.
|
|
|
|
*
|
|
|
|
* NOTE: Use this when we no longer need the trace array returned by
|
|
|
|
* trace_array_get_by_name(). This ensures the trace array can be later
|
|
|
|
* destroyed.
|
|
|
|
*
|
|
|
|
*/
|
2013-07-02 02:50:29 +00:00
|
|
|
void trace_array_put(struct trace_array *this_tr)
|
|
|
|
{
|
2019-11-20 19:08:38 +00:00
|
|
|
if (!this_tr)
|
|
|
|
return;
|
|
|
|
|
2013-07-02 02:50:29 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
__trace_array_put(this_tr);
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
}
|
2019-11-20 19:08:38 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_array_put);
|
2013-07-02 02:50:29 +00:00
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int tracing_check_open_get_tr(struct trace_array *tr)
|
|
|
|
{
|
2019-10-11 21:22:50 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = security_locked_down(LOCKDOWN_TRACEFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
if (tracing_disabled)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (tr && trace_array_get(tr) < 0)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-05 15:45:27 +00:00
|
|
|
int call_filter_check_discard(struct trace_event_call *call, void *rec,
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer,
|
2013-10-24 13:34:17 +00:00
|
|
|
struct ring_buffer_event *event)
|
|
|
|
{
|
|
|
|
if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
|
|
|
|
!filter_match_preds(call->filter, rec)) {
|
2016-05-03 21:15:43 +00:00
|
|
|
__trace_event_discard_commit(buffer, event);
|
2013-10-24 13:34:17 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-04-08 08:15:54 +00:00
|
|
|
}
|
|
|
|
|
2016-04-21 15:35:30 +00:00
|
|
|
void trace_free_pid_list(struct trace_pid_list *pid_list)
|
|
|
|
{
|
|
|
|
vfree(pid_list->pids);
|
|
|
|
kfree(pid_list);
|
|
|
|
}
|
|
|
|
|
2016-04-14 16:15:22 +00:00
|
|
|
/**
|
|
|
|
* trace_find_filtered_pid - check if a pid exists in a filtered_pid list
|
|
|
|
* @filtered_pids: The list of pids to check
|
|
|
|
* @search_pid: The PID to find in @filtered_pids
|
|
|
|
*
|
|
|
|
* Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If pid_max changed after filtered_pids was created, we
|
|
|
|
* by default ignore all pids greater than the previous pid_max.
|
|
|
|
*/
|
|
|
|
if (search_pid >= filtered_pids->pid_max)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return test_bit(search_pid, filtered_pids->pids);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* trace_ignore_this_task - should a task be ignored for tracing
|
|
|
|
* @filtered_pids: The list of pids to check
|
|
|
|
* @task: The task that should be ignored if not filtered
|
|
|
|
*
|
|
|
|
* Checks if @task should be traced or not from @filtered_pids.
|
|
|
|
* Returns true if @task should *NOT* be traced.
|
|
|
|
* Returns false if @task should be traced.
|
|
|
|
*/
|
|
|
|
bool
|
2020-03-20 03:19:06 +00:00
|
|
|
trace_ignore_this_task(struct trace_pid_list *filtered_pids,
|
|
|
|
struct trace_pid_list *filtered_no_pids,
|
|
|
|
struct task_struct *task)
|
2016-04-14 16:15:22 +00:00
|
|
|
{
|
|
|
|
/*
|
2020-03-20 03:19:06 +00:00
|
|
|
* If filterd_no_pids is not empty, and the task's pid is listed
|
|
|
|
* in filtered_no_pids, then return true.
|
|
|
|
* Otherwise, if filtered_pids is empty, that means we can
|
|
|
|
* trace all tasks. If it has content, then only trace pids
|
|
|
|
* within filtered_pids.
|
2016-04-14 16:15:22 +00:00
|
|
|
*/
|
|
|
|
|
2020-03-20 03:19:06 +00:00
|
|
|
return (filtered_pids &&
|
|
|
|
!trace_find_filtered_pid(filtered_pids, task->pid)) ||
|
|
|
|
(filtered_no_pids &&
|
|
|
|
trace_find_filtered_pid(filtered_no_pids, task->pid));
|
2016-04-14 16:15:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-05-23 19:26:28 +00:00
|
|
|
* trace_filter_add_remove_task - Add or remove a task from a pid_list
|
2016-04-14 16:15:22 +00:00
|
|
|
* @pid_list: The list to modify
|
|
|
|
* @self: The current task for fork or NULL for exit
|
|
|
|
* @task: The task to add or remove
|
|
|
|
*
|
|
|
|
* If adding a task, if @self is defined, the task is only added if @self
|
|
|
|
* is also included in @pid_list. This happens on fork and tasks should
|
|
|
|
* only be added when the parent is listed. If @self is NULL, then the
|
|
|
|
* @task pid will be removed from the list, which would happen on exit
|
|
|
|
* of a task.
|
|
|
|
*/
|
|
|
|
void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
|
|
|
|
struct task_struct *self,
|
|
|
|
struct task_struct *task)
|
|
|
|
{
|
|
|
|
if (!pid_list)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* For forks, we only add if the forking task is listed */
|
|
|
|
if (self) {
|
|
|
|
if (!trace_find_filtered_pid(pid_list, self->pid))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sorry, but we don't support pid_max changing after setting */
|
|
|
|
if (task->pid >= pid_list->pid_max)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* "self" is set for forks, and NULL for exits */
|
|
|
|
if (self)
|
|
|
|
set_bit(task->pid, pid_list->pids);
|
|
|
|
else
|
|
|
|
clear_bit(task->pid, pid_list->pids);
|
|
|
|
}
|
|
|
|
|
2016-04-20 19:19:54 +00:00
|
|
|
/**
|
|
|
|
* trace_pid_next - Used for seq_file to get to the next pid of a pid_list
|
|
|
|
* @pid_list: The pid list to show
|
|
|
|
* @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
|
|
|
|
* @pos: The position of the file
|
|
|
|
*
|
|
|
|
* This is used by the seq_file "next" operation to iterate the pids
|
|
|
|
* listed in a trace_pid_list structure.
|
|
|
|
*
|
|
|
|
* Returns the pid+1 as we want to display pid of zero, but NULL would
|
|
|
|
* stop the iteration.
|
|
|
|
*/
|
|
|
|
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
unsigned long pid = (unsigned long)v;
|
|
|
|
|
|
|
|
(*pos)++;
|
|
|
|
|
|
|
|
/* pid already is +1 of the actual prevous bit */
|
|
|
|
pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
|
|
|
|
|
|
|
|
/* Return pid + 1 to allow zero to be represented */
|
|
|
|
if (pid < pid_list->pid_max)
|
|
|
|
return (void *)(pid + 1);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* trace_pid_start - Used for seq_file to start reading pid lists
|
|
|
|
* @pid_list: The pid list to show
|
|
|
|
* @pos: The position of the file
|
|
|
|
*
|
|
|
|
* This is used by seq_file "start" operation to start the iteration
|
|
|
|
* of listing pids.
|
|
|
|
*
|
|
|
|
* Returns the pid+1 as we want to display pid of zero, but NULL would
|
|
|
|
* stop the iteration.
|
|
|
|
*/
|
|
|
|
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
|
|
|
|
{
|
|
|
|
unsigned long pid;
|
|
|
|
loff_t l = 0;
|
|
|
|
|
|
|
|
pid = find_first_bit(pid_list->pids, pid_list->pid_max);
|
|
|
|
if (pid >= pid_list->pid_max)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Return pid + 1 so that zero can be the exit value */
|
|
|
|
for (pid++; pid && l < *pos;
|
|
|
|
pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
|
|
|
|
;
|
|
|
|
return (void *)pid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* trace_pid_show - show the current pid in seq_file processing
|
|
|
|
* @m: The seq_file structure to write into
|
|
|
|
* @v: A void pointer of the pid (+1) value to display
|
|
|
|
*
|
|
|
|
* Can be directly used by seq_file operations to display the current
|
|
|
|
* pid value.
|
|
|
|
*/
|
|
|
|
int trace_pid_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
unsigned long pid = (unsigned long)v - 1;
|
|
|
|
|
|
|
|
seq_printf(m, "%lu\n", pid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-21 15:35:30 +00:00
|
|
|
/* 128 should be much more than enough */
|
|
|
|
#define PID_BUF_SIZE 127
|
|
|
|
|
|
|
|
int trace_pid_write(struct trace_pid_list *filtered_pids,
|
|
|
|
struct trace_pid_list **new_pid_list,
|
|
|
|
const char __user *ubuf, size_t cnt)
|
|
|
|
{
|
|
|
|
struct trace_pid_list *pid_list;
|
|
|
|
struct trace_parser parser;
|
|
|
|
unsigned long val;
|
|
|
|
int nr_pids = 0;
|
|
|
|
ssize_t read = 0;
|
|
|
|
ssize_t ret = 0;
|
|
|
|
loff_t pos;
|
|
|
|
pid_t pid;
|
|
|
|
|
|
|
|
if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Always recreate a new array. The write is an all or nothing
|
|
|
|
* operation. Always create a new array when adding new pids by
|
|
|
|
* the user. If the operation fails, then the current list is
|
|
|
|
* not modified.
|
|
|
|
*/
|
|
|
|
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
|
2019-04-20 02:22:59 +00:00
|
|
|
if (!pid_list) {
|
|
|
|
trace_parser_put(&parser);
|
2016-04-21 15:35:30 +00:00
|
|
|
return -ENOMEM;
|
2019-04-20 02:22:59 +00:00
|
|
|
}
|
2016-04-21 15:35:30 +00:00
|
|
|
|
|
|
|
pid_list->pid_max = READ_ONCE(pid_max);
|
|
|
|
|
|
|
|
/* Only truncating will shrink pid_max */
|
|
|
|
if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
|
|
|
|
pid_list->pid_max = filtered_pids->pid_max;
|
|
|
|
|
|
|
|
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
|
|
|
|
if (!pid_list->pids) {
|
2019-04-20 02:22:59 +00:00
|
|
|
trace_parser_put(&parser);
|
2016-04-21 15:35:30 +00:00
|
|
|
kfree(pid_list);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (filtered_pids) {
|
|
|
|
/* copy the current bits to the new max */
|
2016-07-04 15:10:04 +00:00
|
|
|
for_each_set_bit(pid, filtered_pids->pids,
|
|
|
|
filtered_pids->pid_max) {
|
2016-04-21 15:35:30 +00:00
|
|
|
set_bit(pid, pid_list->pids);
|
|
|
|
nr_pids++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (cnt > 0) {
|
|
|
|
|
|
|
|
pos = 0;
|
|
|
|
|
|
|
|
ret = trace_get_user(&parser, ubuf, cnt, &pos);
|
|
|
|
if (ret < 0 || !trace_parser_loaded(&parser))
|
|
|
|
break;
|
|
|
|
|
|
|
|
read += ret;
|
|
|
|
ubuf += ret;
|
|
|
|
cnt -= ret;
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
if (kstrtoul(parser.buffer, 0, &val))
|
|
|
|
break;
|
|
|
|
if (val >= pid_list->pid_max)
|
|
|
|
break;
|
|
|
|
|
|
|
|
pid = (pid_t)val;
|
|
|
|
|
|
|
|
set_bit(pid, pid_list->pids);
|
|
|
|
nr_pids++;
|
|
|
|
|
|
|
|
trace_parser_clear(&parser);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
trace_parser_put(&parser);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
trace_free_pid_list(pid_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nr_pids) {
|
|
|
|
/* Cleared the list of pids */
|
|
|
|
trace_free_pid_list(pid_list);
|
|
|
|
read = ret;
|
|
|
|
pid_list = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*new_pid_list = pid_list;
|
|
|
|
|
|
|
|
return read;
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
|
2009-03-17 21:22:06 +00:00
|
|
|
{
|
|
|
|
u64 ts;
|
|
|
|
|
|
|
|
/* Early boot up does not have a buffer yet */
|
2013-08-03 01:36:16 +00:00
|
|
|
if (!buf->buffer)
|
2009-03-17 21:22:06 +00:00
|
|
|
return trace_clock_local();
|
|
|
|
|
2013-08-03 01:36:16 +00:00
|
|
|
ts = ring_buffer_time_stamp(buf->buffer, cpu);
|
|
|
|
ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
|
2009-03-17 21:22:06 +00:00
|
|
|
|
|
|
|
return ts;
|
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2016-12-21 19:32:01 +00:00
|
|
|
u64 ftrace_now(int cpu)
|
2013-08-03 01:36:16 +00:00
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
return buffer_ftrace_now(&global_trace.array_buffer, cpu);
|
2013-08-03 01:36:16 +00:00
|
|
|
}
|
|
|
|
|
2013-07-01 19:58:24 +00:00
|
|
|
/**
|
|
|
|
* tracing_is_enabled - Show if global_trace has been disabled
|
|
|
|
*
|
|
|
|
* Shows if the global trace has been enabled or not. It uses the
|
|
|
|
* mirror flag "buffer_disabled" to be used in fast paths such as for
|
|
|
|
* the irqsoff tracer. But it may be inaccurate due to races. If you
|
|
|
|
* need to know the accurate state, use tracing_is_on() which is a little
|
|
|
|
* slower, but accurate.
|
|
|
|
*/
|
ftrace: restructure tracing start/stop infrastructure
Impact: change where tracing is started up and stopped
Currently, when a new tracer is selected via echo'ing a tracer name into
the current_tracer file, the startup is only done if tracing_enabled is
set to one. If tracing_enabled is changed to zero (by echo'ing 0 into
the tracing_enabled file) a full shutdown is performed.
The full startup and shutdown of a tracer can be expensive and the
user can lose out traces when echo'ing in 0 to the tracing_enabled file,
because the process takes too long. There can also be places that
the user would like to start and stop the tracer several times and
doing the full startup and shutdown of a tracer might be too expensive.
This patch performs the full startup and shutdown when a tracer is
selected. It also adds a way to do a quick start or stop of a tracer.
The quick version is just a flag that prevents the tracing from
taking place, but the overhead of the code is still there.
For example, the startup of a tracer may enable tracepoints, or enable
the function tracer. The stop and start will just set a flag to
have the tracer ignore the calls when the tracepoint or function trace
is called. The overhead of the tracer may still be present when
the tracer is stopped, but no tracing will occur. Setting the tracer
to the 'nop' tracer (or any other tracer) will perform the shutdown
of the tracer which will disable the tracepoint or disable the
function tracer.
The tracing_enabled file will simply start or stop tracing.
This change is all internal. The end result for the user should be the same
as before. If tracing_enabled is not set, no trace will happen.
If tracing_enabled is set, then the trace will happen. The tracing_enabled
variable is static between tracers. Enabling tracing_enabled and
going to another tracer will keep tracing_enabled enabled. Same
is true with disabling tracing_enabled.
This patch will now provide a fast start/stop method to the users
for enabling or disabling tracing.
Note: There were two methods to the struct tracer that were never
used: The methods start and stop. These were to be used as a hook
to the reading of the trace output, but ended up not being
necessary. These two methods are now used to enable the start
and stop of each tracer, in case the tracer needs to do more than
just not write into the buffer. For example, the irqsoff tracer
must stop recording max latencies when tracing is stopped.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-05 21:05:44 +00:00
|
|
|
int tracing_is_enabled(void)
|
|
|
|
{
|
2013-07-01 19:58:24 +00:00
|
|
|
/*
|
|
|
|
* For quick access (irqsoff uses this in fast path), just
|
|
|
|
* return the mirror variable of the state of the ring buffer.
|
|
|
|
* It's a little racy, but we don't really care.
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
return !global_trace.buffer_disabled;
|
ftrace: restructure tracing start/stop infrastructure
Impact: change where tracing is started up and stopped
Currently, when a new tracer is selected via echo'ing a tracer name into
the current_tracer file, the startup is only done if tracing_enabled is
set to one. If tracing_enabled is changed to zero (by echo'ing 0 into
the tracing_enabled file) a full shutdown is performed.
The full startup and shutdown of a tracer can be expensive and the
user can lose out traces when echo'ing in 0 to the tracing_enabled file,
because the process takes too long. There can also be places that
the user would like to start and stop the tracer several times and
doing the full startup and shutdown of a tracer might be too expensive.
This patch performs the full startup and shutdown when a tracer is
selected. It also adds a way to do a quick start or stop of a tracer.
The quick version is just a flag that prevents the tracing from
taking place, but the overhead of the code is still there.
For example, the startup of a tracer may enable tracepoints, or enable
the function tracer. The stop and start will just set a flag to
have the tracer ignore the calls when the tracepoint or function trace
is called. The overhead of the tracer may still be present when
the tracer is stopped, but no tracing will occur. Setting the tracer
to the 'nop' tracer (or any other tracer) will perform the shutdown
of the tracer which will disable the tracepoint or disable the
function tracer.
The tracing_enabled file will simply start or stop tracing.
This change is all internal. The end result for the user should be the same
as before. If tracing_enabled is not set, no trace will happen.
If tracing_enabled is set, then the trace will happen. The tracing_enabled
variable is static between tracers. Enabling tracing_enabled and
going to another tracer will keep tracing_enabled enabled. Same
is true with disabling tracing_enabled.
This patch will now provide a fast start/stop method to the users
for enabling or disabling tracing.
Note: There were two methods to the struct tracer that were never
used: The methods start and stop. These were to be used as a hook
to the reading of the trace output, but ended up not being
necessary. These two methods are now used to enable the start
and stop of each tracer, in case the tracer needs to do more than
just not write into the buffer. For example, the irqsoff tracer
must stop recording max latencies when tracing is stopped.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-05 21:05:44 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
/*
|
2008-09-30 03:02:41 +00:00
|
|
|
* trace_buf_size is the size in bytes that is allocated
|
|
|
|
* for a buffer. Note, the number of bytes is always rounded
|
|
|
|
* to page size.
|
2008-07-31 02:36:46 +00:00
|
|
|
*
|
|
|
|
* This number is purposely set to a low number of 16384.
|
|
|
|
* If the dump on oops happens, it will be much appreciated
|
|
|
|
* to not have to wait for all that output. Anyway this can be
|
|
|
|
* boot time and run time configurable.
|
2008-05-12 19:21:00 +00:00
|
|
|
*/
|
2008-09-30 03:02:41 +00:00
|
|
|
#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
|
2008-07-31 02:36:46 +00:00
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
/* trace_types holds a link list of available tracers. */
|
2008-05-12 19:20:42 +00:00
|
|
|
static struct tracer *trace_types __read_mostly;
|
2008-05-12 19:21:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* trace_types_lock is used to protect the trace_types list.
|
|
|
|
*/
|
2013-07-02 02:37:54 +00:00
|
|
|
DEFINE_MUTEX(trace_types_lock);
|
2008-05-12 19:21:00 +00:00
|
|
|
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
/*
|
|
|
|
* serialize the access of the ring buffer
|
|
|
|
*
|
|
|
|
* ring buffer serializes readers, but it is low level protection.
|
|
|
|
* The validity of the events (which returns by ring_buffer_peek() ..etc)
|
|
|
|
* are not protected by ring buffer.
|
|
|
|
*
|
|
|
|
* The content of events may become garbage if we allow other process consumes
|
|
|
|
* these events concurrently:
|
|
|
|
* A) the page of the consumed events may become a normal page
|
|
|
|
* (not reader page) in ring buffer, and this page will be rewrited
|
|
|
|
* by events producer.
|
|
|
|
* B) The page of the consumed events may become a page for splice_read,
|
|
|
|
* and this page will be returned to system.
|
|
|
|
*
|
|
|
|
* These primitives allow multi process access to different cpu ring buffer
|
|
|
|
* concurrently.
|
|
|
|
*
|
|
|
|
* These primitives don't distinguish read-only and read-consume access.
|
|
|
|
* Multi read-only access are also serialized.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static DECLARE_RWSEM(all_cpu_access_lock);
|
|
|
|
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
|
|
|
|
|
|
|
|
static inline void trace_access_lock(int cpu)
|
|
|
|
{
|
2013-01-23 20:22:59 +00:00
|
|
|
if (cpu == RING_BUFFER_ALL_CPUS) {
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
/* gain it for accessing the whole ring buffer. */
|
|
|
|
down_write(&all_cpu_access_lock);
|
|
|
|
} else {
|
|
|
|
/* gain it for accessing a cpu ring buffer. */
|
|
|
|
|
2013-01-23 20:22:59 +00:00
|
|
|
/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
down_read(&all_cpu_access_lock);
|
|
|
|
|
|
|
|
/* Secondly block other access to this @cpu ring buffer. */
|
|
|
|
mutex_lock(&per_cpu(cpu_access_lock, cpu));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void trace_access_unlock(int cpu)
|
|
|
|
{
|
2013-01-23 20:22:59 +00:00
|
|
|
if (cpu == RING_BUFFER_ALL_CPUS) {
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
up_write(&all_cpu_access_lock);
|
|
|
|
} else {
|
|
|
|
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
|
|
|
|
up_read(&all_cpu_access_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void trace_access_lock_init(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
mutex_init(&per_cpu(cpu_access_lock, cpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(access_lock);
|
|
|
|
|
|
|
|
static inline void trace_access_lock(int cpu)
|
|
|
|
{
|
|
|
|
(void)cpu;
|
|
|
|
mutex_lock(&access_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void trace_access_unlock(int cpu)
|
|
|
|
{
|
|
|
|
(void)cpu;
|
|
|
|
mutex_unlock(&access_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void trace_access_lock_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2015-09-25 17:30:47 +00:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
2019-12-13 18:58:57 +00:00
|
|
|
static void __ftrace_trace_stack(struct trace_buffer *buffer,
|
2015-09-25 17:30:47 +00:00
|
|
|
unsigned long flags,
|
|
|
|
int skip, int pc, struct pt_regs *regs);
|
2015-09-30 15:45:22 +00:00
|
|
|
static inline void ftrace_trace_stack(struct trace_array *tr,
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer,
|
2015-09-29 19:38:55 +00:00
|
|
|
unsigned long flags,
|
|
|
|
int skip, int pc, struct pt_regs *regs);
|
2015-09-28 13:41:11 +00:00
|
|
|
|
2015-09-25 17:30:47 +00:00
|
|
|
#else
|
2019-12-13 18:58:57 +00:00
|
|
|
static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
|
2015-09-25 17:30:47 +00:00
|
|
|
unsigned long flags,
|
|
|
|
int skip, int pc, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
}
|
2015-09-30 15:45:22 +00:00
|
|
|
static inline void ftrace_trace_stack(struct trace_array *tr,
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer,
|
2015-09-29 19:38:55 +00:00
|
|
|
unsigned long flags,
|
|
|
|
int skip, int pc, struct pt_regs *regs)
|
2015-09-28 13:41:11 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-09-25 17:30:47 +00:00
|
|
|
#endif
|
|
|
|
|
2016-11-23 16:29:58 +00:00
|
|
|
static __always_inline void
|
|
|
|
trace_event_setup(struct ring_buffer_event *event,
|
|
|
|
int type, unsigned long flags, int pc)
|
|
|
|
{
|
|
|
|
struct trace_entry *ent = ring_buffer_event_data(event);
|
|
|
|
|
2019-05-25 16:57:59 +00:00
|
|
|
tracing_generic_entry_update(ent, type, flags, pc);
|
2016-11-23 16:29:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline struct ring_buffer_event *
|
2019-12-13 18:58:57 +00:00
|
|
|
__trace_buffer_lock_reserve(struct trace_buffer *buffer,
|
2016-11-23 16:29:58 +00:00
|
|
|
int type,
|
|
|
|
unsigned long len,
|
|
|
|
unsigned long flags, int pc)
|
|
|
|
{
|
|
|
|
struct ring_buffer_event *event;
|
|
|
|
|
|
|
|
event = ring_buffer_lock_reserve(buffer, len);
|
|
|
|
if (event != NULL)
|
|
|
|
trace_event_setup(event, type, flags, pc);
|
|
|
|
|
|
|
|
return event;
|
|
|
|
}
|
|
|
|
|
2017-04-20 15:46:03 +00:00
|
|
|
void tracer_tracing_on(struct trace_array *tr)
|
2013-07-01 19:58:24 +00:00
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
if (tr->array_buffer.buffer)
|
|
|
|
ring_buffer_record_on(tr->array_buffer.buffer);
|
2013-07-01 19:58:24 +00:00
|
|
|
/*
|
|
|
|
* This flag is looked at when buffers haven't been allocated
|
|
|
|
* yet, or by some tracers (like irqsoff), that just want to
|
|
|
|
* know if the ring buffer has been disabled, but it can handle
|
|
|
|
* races of where it gets disabled but we still do a record.
|
|
|
|
* As the check is in the fast path of the tracers, it is more
|
|
|
|
* important to be fast than accurate.
|
|
|
|
*/
|
|
|
|
tr->buffer_disabled = 0;
|
|
|
|
/* Make the flag seen by readers */
|
|
|
|
smp_wmb();
|
|
|
|
}
|
|
|
|
|
2012-02-22 20:50:28 +00:00
|
|
|
/**
|
|
|
|
* tracing_on - enable tracing buffers
|
|
|
|
*
|
|
|
|
* This function enables tracing buffers that may have been
|
|
|
|
* disabled with tracing_off.
|
|
|
|
*/
|
|
|
|
void tracing_on(void)
|
|
|
|
{
|
2013-07-01 19:58:24 +00:00
|
|
|
tracer_tracing_on(&global_trace);
|
2012-02-22 20:50:28 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_on);
|
|
|
|
|
2016-11-24 01:28:38 +00:00
|
|
|
|
|
|
|
static __always_inline void
|
2019-12-13 18:58:57 +00:00
|
|
|
__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
|
2016-11-24 01:28:38 +00:00
|
|
|
{
|
2017-06-27 02:01:55 +00:00
|
|
|
__this_cpu_write(trace_taskinfo_save, true);
|
2016-11-24 01:28:38 +00:00
|
|
|
|
|
|
|
/* If this is the temp buffer, we need to commit fully */
|
|
|
|
if (this_cpu_read(trace_buffered_event) == event) {
|
|
|
|
/* Length is in event->array[0] */
|
|
|
|
ring_buffer_write(buffer, event->array[0], &event->array[1]);
|
|
|
|
/* Release the temp buffer */
|
|
|
|
this_cpu_dec(trace_buffered_event_cnt);
|
|
|
|
} else
|
|
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
|
|
}
|
|
|
|
|
2013-03-09 02:02:34 +00:00
|
|
|
/**
|
|
|
|
* __trace_puts - write a constant string into the trace buffer.
|
|
|
|
* @ip: The address of the caller
|
|
|
|
* @str: The constant string to write
|
|
|
|
* @size: The size of the string.
|
|
|
|
*/
|
|
|
|
int __trace_puts(unsigned long ip, const char *str, int size)
|
|
|
|
{
|
|
|
|
struct ring_buffer_event *event;
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2013-03-09 02:02:34 +00:00
|
|
|
struct print_entry *entry;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
int alloc;
|
2013-07-18 08:31:05 +00:00
|
|
|
int pc;
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
|
2013-07-18 08:31:18 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-07-18 08:31:05 +00:00
|
|
|
pc = preempt_count();
|
2013-03-09 02:02:34 +00:00
|
|
|
|
2014-01-23 17:27:59 +00:00
|
|
|
if (unlikely(tracing_selftest_running || tracing_disabled))
|
|
|
|
return 0;
|
|
|
|
|
2013-03-09 02:02:34 +00:00
|
|
|
alloc = sizeof(*entry) + size + 2; /* possible \n added */
|
|
|
|
|
|
|
|
local_save_flags(irq_flags);
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = global_trace.array_buffer.buffer;
|
2020-01-16 13:20:18 +00:00
|
|
|
ring_buffer_nest_start(buffer);
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
|
|
|
|
irq_flags, pc);
|
2020-01-16 13:20:18 +00:00
|
|
|
if (!event) {
|
|
|
|
size = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-03-09 02:02:34 +00:00
|
|
|
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
entry->ip = ip;
|
|
|
|
|
|
|
|
memcpy(&entry->buf, str, size);
|
|
|
|
|
|
|
|
/* Add a newline if necessary */
|
|
|
|
if (entry->buf[size - 1] != '\n') {
|
|
|
|
entry->buf[size] = '\n';
|
|
|
|
entry->buf[size + 1] = '\0';
|
|
|
|
} else
|
|
|
|
entry->buf[size] = '\0';
|
|
|
|
|
|
|
|
__buffer_unlock_commit(buffer, event);
|
2015-09-30 15:45:22 +00:00
|
|
|
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
|
2020-01-16 13:20:18 +00:00
|
|
|
out:
|
|
|
|
ring_buffer_nest_end(buffer);
|
2013-03-09 02:02:34 +00:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__trace_puts);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __trace_bputs - write the pointer to a constant string into trace buffer
|
|
|
|
* @ip: The address of the caller
|
|
|
|
* @str: The constant string to write to the buffer to
|
|
|
|
*/
|
|
|
|
int __trace_bputs(unsigned long ip, const char *str)
|
|
|
|
{
|
|
|
|
struct ring_buffer_event *event;
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2013-03-09 02:02:34 +00:00
|
|
|
struct bputs_entry *entry;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
int size = sizeof(struct bputs_entry);
|
2020-01-16 13:20:18 +00:00
|
|
|
int ret = 0;
|
2013-07-18 08:31:05 +00:00
|
|
|
int pc;
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
|
2013-07-18 08:31:18 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-07-18 08:31:05 +00:00
|
|
|
pc = preempt_count();
|
2013-03-09 02:02:34 +00:00
|
|
|
|
2014-01-23 17:27:59 +00:00
|
|
|
if (unlikely(tracing_selftest_running || tracing_disabled))
|
|
|
|
return 0;
|
|
|
|
|
2013-03-09 02:02:34 +00:00
|
|
|
local_save_flags(irq_flags);
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = global_trace.array_buffer.buffer;
|
2020-01-16 13:20:18 +00:00
|
|
|
|
|
|
|
ring_buffer_nest_start(buffer);
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
|
|
|
|
irq_flags, pc);
|
2013-03-09 02:02:34 +00:00
|
|
|
if (!event)
|
2020-01-16 13:20:18 +00:00
|
|
|
goto out;
|
2013-03-09 02:02:34 +00:00
|
|
|
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
entry->ip = ip;
|
|
|
|
entry->str = str;
|
|
|
|
|
|
|
|
__buffer_unlock_commit(buffer, event);
|
2015-09-30 15:45:22 +00:00
|
|
|
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
|
2013-03-09 02:02:34 +00:00
|
|
|
|
2020-01-16 13:20:18 +00:00
|
|
|
ret = 1;
|
|
|
|
out:
|
|
|
|
ring_buffer_nest_end(buffer);
|
|
|
|
return ret;
|
2013-03-09 02:02:34 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__trace_bputs);
|
|
|
|
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
2020-04-23 04:08:25 +00:00
|
|
|
static void tracing_snapshot_instance_cond(struct trace_array *tr,
|
|
|
|
void *cond_data)
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
{
|
|
|
|
struct tracer *tracer = tr->current_trace;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2013-03-09 05:56:08 +00:00
|
|
|
if (in_nmi()) {
|
|
|
|
internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
|
|
|
|
internal_trace_puts("*** snapshot is being ignored ***\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
if (!tr->allocated_snapshot) {
|
2013-03-09 05:40:58 +00:00
|
|
|
internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
|
|
|
|
internal_trace_puts("*** stopping trace here! ***\n");
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
tracing_off();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note, snapshot can not be used when the tracer uses it */
|
|
|
|
if (tracer->use_max_tr) {
|
2013-03-09 05:40:58 +00:00
|
|
|
internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
|
|
|
|
internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
update_max_tr(tr, current, smp_processor_id(), cond_data);
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
2017-04-20 15:34:06 +00:00
|
|
|
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
void tracing_snapshot_instance(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
tracing_snapshot_instance_cond(tr, NULL);
|
|
|
|
}
|
|
|
|
|
2017-04-20 15:34:06 +00:00
|
|
|
/**
|
2017-10-19 06:32:33 +00:00
|
|
|
* tracing_snapshot - take a snapshot of the current buffer.
|
2017-04-20 15:34:06 +00:00
|
|
|
*
|
|
|
|
* This causes a swap between the snapshot buffer and the current live
|
|
|
|
* tracing buffer. You can use this to take snapshots of the live
|
|
|
|
* trace when some condition is triggered, but continue to trace.
|
|
|
|
*
|
|
|
|
* Note, make sure to allocate the snapshot with either
|
|
|
|
* a tracing_snapshot_alloc(), or by doing it manually
|
|
|
|
* with: echo 1 > /sys/kernel/debug/tracing/snapshot
|
|
|
|
*
|
|
|
|
* If the snapshot buffer is not allocated, it will stop tracing.
|
|
|
|
* Basically making a permanent snapshot.
|
|
|
|
*/
|
|
|
|
void tracing_snapshot(void)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = &global_trace;
|
|
|
|
|
|
|
|
tracing_snapshot_instance(tr);
|
|
|
|
}
|
2013-03-09 05:56:08 +00:00
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot);
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
/**
|
|
|
|
* tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
|
|
|
|
* @tr: The tracing instance to snapshot
|
|
|
|
* @cond_data: The data to be tested conditionally, and possibly saved
|
|
|
|
*
|
|
|
|
* This is the same as tracing_snapshot() except that the snapshot is
|
|
|
|
* conditional - the snapshot will only happen if the
|
|
|
|
* cond_snapshot.update() implementation receiving the cond_data
|
|
|
|
* returns true, which means that the trace array's cond_snapshot
|
|
|
|
* update() operation used the cond_data to determine whether the
|
|
|
|
* snapshot should be taken, and if it was, presumably saved it along
|
|
|
|
* with the snapshot.
|
|
|
|
*/
|
|
|
|
void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
|
|
|
|
{
|
|
|
|
tracing_snapshot_instance_cond(tr, cond_data);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tracing_snapshot_cond_data - get the user data associated with a snapshot
|
|
|
|
* @tr: The tracing instance
|
|
|
|
*
|
|
|
|
* When the user enables a conditional snapshot using
|
|
|
|
* tracing_snapshot_cond_enable(), the user-defined cond_data is saved
|
|
|
|
* with the snapshot. This accessor is used to retrieve it.
|
|
|
|
*
|
|
|
|
* Should not be called from cond_snapshot.update(), since it takes
|
|
|
|
* the tr->max_lock lock, which the code calling
|
|
|
|
* cond_snapshot.update() has already done.
|
|
|
|
*
|
|
|
|
* Returns the cond_data associated with the trace array's snapshot.
|
|
|
|
*/
|
|
|
|
void *tracing_cond_snapshot_data(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
void *cond_data = NULL;
|
|
|
|
|
|
|
|
arch_spin_lock(&tr->max_lock);
|
|
|
|
|
|
|
|
if (tr->cond_snapshot)
|
|
|
|
cond_data = tr->cond_snapshot->cond_data;
|
|
|
|
|
|
|
|
arch_spin_unlock(&tr->max_lock);
|
|
|
|
|
|
|
|
return cond_data;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
|
|
|
|
struct array_buffer *size_buf, int cpu_id);
|
|
|
|
static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
|
2013-03-12 15:17:54 +00:00
|
|
|
|
2018-05-28 14:56:36 +00:00
|
|
|
int tracing_alloc_snapshot_instance(struct trace_array *tr)
|
2013-03-12 15:17:54 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!tr->allocated_snapshot) {
|
|
|
|
|
|
|
|
/* allocate spare buffer */
|
|
|
|
ret = resize_buffer_duplicate_size(&tr->max_buffer,
|
2020-01-09 23:53:48 +00:00
|
|
|
&tr->array_buffer, RING_BUFFER_ALL_CPUS);
|
2013-03-12 15:17:54 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
tr->allocated_snapshot = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-17 19:44:42 +00:00
|
|
|
static void free_snapshot(struct trace_array *tr)
|
2013-03-12 15:17:54 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We don't free the ring buffer. instead, resize it because
|
|
|
|
* The max_tr ring buffer has some state (e.g. ring->clock) and
|
|
|
|
* we want preserve it.
|
|
|
|
*/
|
|
|
|
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
|
|
|
|
set_buffer_entries(&tr->max_buffer, 1);
|
|
|
|
tracing_reset_online_cpus(&tr->max_buffer);
|
|
|
|
tr->allocated_snapshot = false;
|
|
|
|
}
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
|
2013-10-24 13:59:26 +00:00
|
|
|
/**
|
|
|
|
* tracing_alloc_snapshot - allocate snapshot buffer.
|
|
|
|
*
|
|
|
|
* This only allocates the snapshot buffer if it isn't already
|
|
|
|
* allocated - it doesn't also take a snapshot.
|
|
|
|
*
|
|
|
|
* This is meant to be used in cases where the snapshot buffer needs
|
|
|
|
* to be set up for events that can't sleep but need to be able to
|
|
|
|
* trigger a snapshot.
|
|
|
|
*/
|
|
|
|
int tracing_alloc_snapshot(void)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = &global_trace;
|
|
|
|
int ret;
|
|
|
|
|
2018-05-28 14:56:36 +00:00
|
|
|
ret = tracing_alloc_snapshot_instance(tr);
|
2013-10-24 13:59:26 +00:00
|
|
|
WARN_ON(ret < 0);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
|
|
|
|
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
/**
|
2017-10-19 06:32:33 +00:00
|
|
|
* tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
*
|
2017-10-19 06:32:33 +00:00
|
|
|
* This is similar to tracing_snapshot(), but it will allocate the
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
* snapshot buffer if it isn't already allocated. Use this only
|
|
|
|
* where it is safe to sleep, as the allocation may sleep.
|
|
|
|
*
|
|
|
|
* This causes a swap between the snapshot buffer and the current live
|
|
|
|
* tracing buffer. You can use this to take snapshots of the live
|
|
|
|
* trace when some condition is triggered, but continue to trace.
|
|
|
|
*/
|
|
|
|
void tracing_snapshot_alloc(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2013-10-24 13:59:26 +00:00
|
|
|
ret = tracing_alloc_snapshot();
|
|
|
|
if (ret < 0)
|
2013-03-12 15:17:54 +00:00
|
|
|
return;
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
|
|
|
|
tracing_snapshot();
|
|
|
|
}
|
2013-03-09 05:56:08 +00:00
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* tracing_snapshot_cond_enable - enable conditional snapshot for an instance
|
|
|
|
* @tr: The tracing instance
|
|
|
|
* @cond_data: User data to associate with the snapshot
|
|
|
|
* @update: Implementation of the cond_snapshot update function
|
|
|
|
*
|
|
|
|
* Check whether the conditional snapshot for the given instance has
|
|
|
|
* already been enabled, or if the current tracer is already using a
|
|
|
|
* snapshot; if so, return -EBUSY, else create a cond_snapshot and
|
|
|
|
* save the cond_data and update function inside.
|
|
|
|
*
|
|
|
|
* Returns 0 if successful, error otherwise.
|
|
|
|
*/
|
|
|
|
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
|
|
|
|
cond_update_fn_t update)
|
|
|
|
{
|
|
|
|
struct cond_snapshot *cond_snapshot;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
|
|
|
|
if (!cond_snapshot)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
cond_snapshot->cond_data = cond_data;
|
|
|
|
cond_snapshot->update = update;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
|
|
ret = tracing_alloc_snapshot_instance(tr);
|
|
|
|
if (ret)
|
|
|
|
goto fail_unlock;
|
|
|
|
|
|
|
|
if (tr->current_trace->use_max_tr) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto fail_unlock;
|
|
|
|
}
|
|
|
|
|
2019-02-14 23:45:21 +00:00
|
|
|
/*
|
|
|
|
* The cond_snapshot can only change to NULL without the
|
|
|
|
* trace_types_lock. We don't care if we race with it going
|
|
|
|
* to NULL, but we want to make sure that it's not set to
|
|
|
|
* something other than NULL when we get here, which we can
|
|
|
|
* do safely with only holding the trace_types_lock and not
|
|
|
|
* having to take the max_lock.
|
|
|
|
*/
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
if (tr->cond_snapshot) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto fail_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
arch_spin_lock(&tr->max_lock);
|
|
|
|
tr->cond_snapshot = cond_snapshot;
|
|
|
|
arch_spin_unlock(&tr->max_lock);
|
|
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
fail_unlock:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
kfree(cond_snapshot);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tracing_snapshot_cond_disable - disable conditional snapshot for an instance
|
|
|
|
* @tr: The tracing instance
|
|
|
|
*
|
|
|
|
* Check whether the conditional snapshot for the given instance is
|
|
|
|
* enabled; if so, free the cond_snapshot associated with it,
|
|
|
|
* otherwise return -EINVAL.
|
|
|
|
*
|
|
|
|
* Returns 0 if successful, error otherwise.
|
|
|
|
*/
|
|
|
|
int tracing_snapshot_cond_disable(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
arch_spin_lock(&tr->max_lock);
|
|
|
|
|
|
|
|
if (!tr->cond_snapshot)
|
|
|
|
ret = -EINVAL;
|
|
|
|
else {
|
|
|
|
kfree(tr->cond_snapshot);
|
|
|
|
tr->cond_snapshot = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
arch_spin_unlock(&tr->max_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
#else
|
|
|
|
void tracing_snapshot(void)
|
|
|
|
{
|
|
|
|
WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
|
|
|
|
}
|
2013-03-09 05:56:08 +00:00
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot);
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
|
|
|
|
{
|
|
|
|
WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
|
2013-10-24 13:59:26 +00:00
|
|
|
int tracing_alloc_snapshot(void)
|
|
|
|
{
|
|
|
|
WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
void tracing_snapshot_alloc(void)
|
|
|
|
{
|
|
|
|
/* Give warning */
|
|
|
|
tracing_snapshot();
|
|
|
|
}
|
2013-03-09 05:56:08 +00:00
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
void *tracing_cond_snapshot_data(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
|
|
|
|
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
|
|
|
|
int tracing_snapshot_cond_disable(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
|
tracing: Add internal tracing_snapshot() functions
The new snapshot feature is quite handy. It's a way for the user
to take advantage of the spare buffer that, until then, only
the latency tracers used to "snapshot" the buffer when it hit
a max latency. Now users can trigger a "snapshot" manually when
some condition is hit in a program. But a snapshot currently can
not be triggered by a condition inside the kernel.
With the addition of tracing_snapshot() and tracing_snapshot_alloc(),
snapshots can now be taking when a condition is hit, and the
developer wants to snapshot the case without stopping the trace.
Note, any snapshot will overwrite the old one, so take care
in how this is done.
These new functions are to be used like tracing_on(), tracing_off()
and trace_printk() are. That is, they should never be called
in the mainline Linux kernel. They are solely for the purpose
of debugging.
The tracing_snapshot() will not allocate a buffer, but it is
safe to be called from any context (except NMIs). But if a
snapshot buffer isn't allocated when it is called, it will write
to the live buffer, complaining about the lack of a snapshot
buffer, and then stop tracing (giving you the "permanent snapshot").
tracing_snapshot_alloc() will allocate the snapshot buffer if
it was not already allocated and then take the snapshot. This routine
*may sleep*, and must be called from context that can sleep.
The allocation is done with GFP_KERNEL and not atomic.
If you need a snapshot in an atomic context, say in early boot,
then it is best to call the tracing_snapshot_alloc() before then,
where it will allocate the buffer, and then you can use the
tracing_snapshot() anywhere you want and still get snapshots.
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-07 02:45:37 +00:00
|
|
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
|
|
|
|
2017-04-20 15:46:03 +00:00
|
|
|
void tracer_tracing_off(struct trace_array *tr)
|
2013-07-01 19:58:24 +00:00
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
if (tr->array_buffer.buffer)
|
|
|
|
ring_buffer_record_off(tr->array_buffer.buffer);
|
2013-07-01 19:58:24 +00:00
|
|
|
/*
|
|
|
|
* This flag is looked at when buffers haven't been allocated
|
|
|
|
* yet, or by some tracers (like irqsoff), that just want to
|
|
|
|
* know if the ring buffer has been disabled, but it can handle
|
|
|
|
* races of where it gets disabled but we still do a record.
|
|
|
|
* As the check is in the fast path of the tracers, it is more
|
|
|
|
* important to be fast than accurate.
|
|
|
|
*/
|
|
|
|
tr->buffer_disabled = 1;
|
|
|
|
/* Make the flag seen by readers */
|
|
|
|
smp_wmb();
|
|
|
|
}
|
|
|
|
|
2012-02-22 20:50:28 +00:00
|
|
|
/**
|
|
|
|
* tracing_off - turn off tracing buffers
|
|
|
|
*
|
|
|
|
* This function stops the tracing buffers from recording data.
|
|
|
|
* It does not disable any overhead the tracers themselves may
|
|
|
|
* be causing. This function simply causes all recording to
|
|
|
|
* the ring buffers to fail.
|
|
|
|
*/
|
|
|
|
void tracing_off(void)
|
|
|
|
{
|
2013-07-01 19:58:24 +00:00
|
|
|
tracer_tracing_off(&global_trace);
|
2012-02-22 20:50:28 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_off);
|
|
|
|
|
2013-06-14 20:21:43 +00:00
|
|
|
void disable_trace_on_warning(void)
|
|
|
|
{
|
2020-05-29 14:46:32 +00:00
|
|
|
if (__disable_trace_on_warning) {
|
|
|
|
trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
|
|
|
|
"Disabling tracing due to warning\n");
|
2013-06-14 20:21:43 +00:00
|
|
|
tracing_off();
|
2020-05-29 14:46:32 +00:00
|
|
|
}
|
2013-06-14 20:21:43 +00:00
|
|
|
}
|
|
|
|
|
2013-07-01 19:58:24 +00:00
|
|
|
/**
|
|
|
|
* tracer_tracing_is_on - show real state of ring buffer enabled
|
|
|
|
* @tr : the trace array to know if ring buffer is enabled
|
|
|
|
*
|
|
|
|
* Shows real state of the ring buffer if it is enabled or not.
|
|
|
|
*/
|
2018-08-01 20:08:57 +00:00
|
|
|
bool tracer_tracing_is_on(struct trace_array *tr)
|
2013-07-01 19:58:24 +00:00
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
if (tr->array_buffer.buffer)
|
|
|
|
return ring_buffer_record_is_on(tr->array_buffer.buffer);
|
2013-07-01 19:58:24 +00:00
|
|
|
return !tr->buffer_disabled;
|
|
|
|
}
|
|
|
|
|
2012-02-22 20:50:28 +00:00
|
|
|
/**
|
|
|
|
* tracing_is_on - show state of ring buffers enabled
|
|
|
|
*/
|
|
|
|
int tracing_is_on(void)
|
|
|
|
{
|
2013-07-01 19:58:24 +00:00
|
|
|
return tracer_tracing_is_on(&global_trace);
|
2012-02-22 20:50:28 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tracing_is_on);
|
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
static int __init set_buf_size(char *str)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2008-09-30 03:02:41 +00:00
|
|
|
unsigned long buf_size;
|
2008-05-12 19:21:00 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
if (!str)
|
|
|
|
return 0;
|
2009-06-24 09:33:15 +00:00
|
|
|
buf_size = memparse(str, &str);
|
2008-05-12 19:21:00 +00:00
|
|
|
/* nr_entries can not be zero */
|
2009-06-24 09:33:15 +00:00
|
|
|
if (buf_size == 0)
|
2008-05-12 19:21:00 +00:00
|
|
|
return 0;
|
2008-09-30 03:02:41 +00:00
|
|
|
trace_buf_size = buf_size;
|
2008-05-12 19:20:42 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2008-09-30 03:02:41 +00:00
|
|
|
__setup("trace_buf_size=", set_buf_size);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2010-02-25 23:36:43 +00:00
|
|
|
static int __init set_tracing_thresh(char *str)
|
|
|
|
{
|
2012-08-02 06:02:00 +00:00
|
|
|
unsigned long threshold;
|
2010-02-25 23:36:43 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!str)
|
|
|
|
return 0;
|
2012-09-26 20:08:38 +00:00
|
|
|
ret = kstrtoul(str, 0, &threshold);
|
2010-02-25 23:36:43 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return 0;
|
2012-08-02 06:02:00 +00:00
|
|
|
tracing_thresh = threshold * 1000;
|
2010-02-25 23:36:43 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("tracing_thresh=", set_tracing_thresh);
|
|
|
|
|
2008-05-12 19:20:44 +00:00
|
|
|
unsigned long nsecs_to_usecs(unsigned long nsecs)
|
|
|
|
{
|
|
|
|
return nsecs / 1000;
|
|
|
|
}
|
|
|
|
|
tracing: Use TRACE_FLAGS macro to keep enums and strings matched
Use a cute little macro trick to keep the names of the trace flags file
guaranteed to match the corresponding masks.
The macro TRACE_FLAGS is defined as a serious of enum names followed by
the string name of the file that matches it. For example:
#define TRACE_FLAGS \
C(PRINT_PARENT, "print-parent"), \
C(SYM_OFFSET, "sym-offset"), \
C(SYM_ADDR, "sym-addr"), \
C(VERBOSE, "verbose"),
Now we can define the following:
#undef C
#define C(a, b) TRACE_ITER_##a##_BIT
enum trace_iterator_bits { TRACE_FLAGS };
The above creates:
enum trace_iterator_bits {
TRACE_ITER_PRINT_PARENT_BIT,
TRACE_ITER_SYM_OFFSET_BIT,
TRACE_ITER_SYM_ADDR_BIT,
TRACE_ITER_VERBOSE_BIT,
};
Then we can redefine C as:
#undef C
#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
enum trace_iterator_flags { TRACE_FLAGS };
Which creates:
enum trace_iterator_flags {
TRACE_ITER_PRINT_PARENT = (1 << TRACE_ITER_PRINT_PARENT_BIT),
TRACE_ITER_SYM_OFFSET = (1 << TRACE_ITER_SYM_OFFSET_BIT),
TRACE_ITER_SYM_ADDR = (1 << TRACE_ITER_SYM_ADDR_BIT),
TRACE_ITER_VERBOSE = (1 << TRACE_ITER_VERBOSE_BIT),
};
Then finally we can create the list of file names:
#undef C
#define C(a, b) b
static const char *trace_options[] = {
TRACE_FLAGS
NULL
};
Which creates:
static const char *trace_options[] = {
"print-parent",
"sym-offset",
"sym-addr",
"verbose",
NULL
};
The importance of this is that the strings match the bit index.
trace_options[TRACE_ITER_SYM_ADDR_BIT] == "sym-addr"
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-09-29 13:43:30 +00:00
|
|
|
/*
|
|
|
|
* TRACE_FLAGS is defined as a tuple matching bit masks with strings.
|
2017-05-31 21:56:48 +00:00
|
|
|
* It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
|
tracing: Use TRACE_FLAGS macro to keep enums and strings matched
Use a cute little macro trick to keep the names of the trace flags file
guaranteed to match the corresponding masks.
The macro TRACE_FLAGS is defined as a serious of enum names followed by
the string name of the file that matches it. For example:
#define TRACE_FLAGS \
C(PRINT_PARENT, "print-parent"), \
C(SYM_OFFSET, "sym-offset"), \
C(SYM_ADDR, "sym-addr"), \
C(VERBOSE, "verbose"),
Now we can define the following:
#undef C
#define C(a, b) TRACE_ITER_##a##_BIT
enum trace_iterator_bits { TRACE_FLAGS };
The above creates:
enum trace_iterator_bits {
TRACE_ITER_PRINT_PARENT_BIT,
TRACE_ITER_SYM_OFFSET_BIT,
TRACE_ITER_SYM_ADDR_BIT,
TRACE_ITER_VERBOSE_BIT,
};
Then we can redefine C as:
#undef C
#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
enum trace_iterator_flags { TRACE_FLAGS };
Which creates:
enum trace_iterator_flags {
TRACE_ITER_PRINT_PARENT = (1 << TRACE_ITER_PRINT_PARENT_BIT),
TRACE_ITER_SYM_OFFSET = (1 << TRACE_ITER_SYM_OFFSET_BIT),
TRACE_ITER_SYM_ADDR = (1 << TRACE_ITER_SYM_ADDR_BIT),
TRACE_ITER_VERBOSE = (1 << TRACE_ITER_VERBOSE_BIT),
};
Then finally we can create the list of file names:
#undef C
#define C(a, b) b
static const char *trace_options[] = {
TRACE_FLAGS
NULL
};
Which creates:
static const char *trace_options[] = {
"print-parent",
"sym-offset",
"sym-addr",
"verbose",
NULL
};
The importance of this is that the strings match the bit index.
trace_options[TRACE_ITER_SYM_ADDR_BIT] == "sym-addr"
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-09-29 13:43:30 +00:00
|
|
|
* matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
|
2017-05-31 21:56:48 +00:00
|
|
|
* of strings in the order that the evals (enum) were defined.
|
tracing: Use TRACE_FLAGS macro to keep enums and strings matched
Use a cute little macro trick to keep the names of the trace flags file
guaranteed to match the corresponding masks.
The macro TRACE_FLAGS is defined as a serious of enum names followed by
the string name of the file that matches it. For example:
#define TRACE_FLAGS \
C(PRINT_PARENT, "print-parent"), \
C(SYM_OFFSET, "sym-offset"), \
C(SYM_ADDR, "sym-addr"), \
C(VERBOSE, "verbose"),
Now we can define the following:
#undef C
#define C(a, b) TRACE_ITER_##a##_BIT
enum trace_iterator_bits { TRACE_FLAGS };
The above creates:
enum trace_iterator_bits {
TRACE_ITER_PRINT_PARENT_BIT,
TRACE_ITER_SYM_OFFSET_BIT,
TRACE_ITER_SYM_ADDR_BIT,
TRACE_ITER_VERBOSE_BIT,
};
Then we can redefine C as:
#undef C
#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
enum trace_iterator_flags { TRACE_FLAGS };
Which creates:
enum trace_iterator_flags {
TRACE_ITER_PRINT_PARENT = (1 << TRACE_ITER_PRINT_PARENT_BIT),
TRACE_ITER_SYM_OFFSET = (1 << TRACE_ITER_SYM_OFFSET_BIT),
TRACE_ITER_SYM_ADDR = (1 << TRACE_ITER_SYM_ADDR_BIT),
TRACE_ITER_VERBOSE = (1 << TRACE_ITER_VERBOSE_BIT),
};
Then finally we can create the list of file names:
#undef C
#define C(a, b) b
static const char *trace_options[] = {
TRACE_FLAGS
NULL
};
Which creates:
static const char *trace_options[] = {
"print-parent",
"sym-offset",
"sym-addr",
"verbose",
NULL
};
The importance of this is that the strings match the bit index.
trace_options[TRACE_ITER_SYM_ADDR_BIT] == "sym-addr"
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-09-29 13:43:30 +00:00
|
|
|
*/
|
|
|
|
#undef C
|
|
|
|
#define C(a, b) b
|
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
/* These must match the bit postions in trace_iterator_flags */
|
2008-05-12 19:20:42 +00:00
|
|
|
static const char *trace_options[] = {
|
tracing: Use TRACE_FLAGS macro to keep enums and strings matched
Use a cute little macro trick to keep the names of the trace flags file
guaranteed to match the corresponding masks.
The macro TRACE_FLAGS is defined as a serious of enum names followed by
the string name of the file that matches it. For example:
#define TRACE_FLAGS \
C(PRINT_PARENT, "print-parent"), \
C(SYM_OFFSET, "sym-offset"), \
C(SYM_ADDR, "sym-addr"), \
C(VERBOSE, "verbose"),
Now we can define the following:
#undef C
#define C(a, b) TRACE_ITER_##a##_BIT
enum trace_iterator_bits { TRACE_FLAGS };
The above creates:
enum trace_iterator_bits {
TRACE_ITER_PRINT_PARENT_BIT,
TRACE_ITER_SYM_OFFSET_BIT,
TRACE_ITER_SYM_ADDR_BIT,
TRACE_ITER_VERBOSE_BIT,
};
Then we can redefine C as:
#undef C
#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
enum trace_iterator_flags { TRACE_FLAGS };
Which creates:
enum trace_iterator_flags {
TRACE_ITER_PRINT_PARENT = (1 << TRACE_ITER_PRINT_PARENT_BIT),
TRACE_ITER_SYM_OFFSET = (1 << TRACE_ITER_SYM_OFFSET_BIT),
TRACE_ITER_SYM_ADDR = (1 << TRACE_ITER_SYM_ADDR_BIT),
TRACE_ITER_VERBOSE = (1 << TRACE_ITER_VERBOSE_BIT),
};
Then finally we can create the list of file names:
#undef C
#define C(a, b) b
static const char *trace_options[] = {
TRACE_FLAGS
NULL
};
Which creates:
static const char *trace_options[] = {
"print-parent",
"sym-offset",
"sym-addr",
"verbose",
NULL
};
The importance of this is that the strings match the bit index.
trace_options[TRACE_ITER_SYM_ADDR_BIT] == "sym-addr"
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-09-29 13:43:30 +00:00
|
|
|
TRACE_FLAGS
|
2008-05-12 19:20:42 +00:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2009-08-25 08:12:56 +00:00
|
|
|
static struct {
|
|
|
|
u64 (*func)(void);
|
|
|
|
const char *name;
|
2012-11-13 20:18:22 +00:00
|
|
|
int in_ns; /* is this clock in nanoseconds? */
|
2009-08-25 08:12:56 +00:00
|
|
|
} trace_clocks[] = {
|
2014-07-16 21:05:25 +00:00
|
|
|
{ trace_clock_local, "local", 1 },
|
|
|
|
{ trace_clock_global, "global", 1 },
|
|
|
|
{ trace_clock_counter, "counter", 0 },
|
2014-08-06 00:46:42 +00:00
|
|
|
{ trace_clock_jiffies, "uptime", 0 },
|
2014-07-16 21:05:25 +00:00
|
|
|
{ trace_clock, "perf", 1 },
|
|
|
|
{ ktime_get_mono_fast_ns, "mono", 1 },
|
2015-05-08 14:30:39 +00:00
|
|
|
{ ktime_get_raw_fast_ns, "mono_raw", 1 },
|
2018-04-25 13:33:38 +00:00
|
|
|
{ ktime_get_boot_fast_ns, "boot", 1 },
|
2012-11-13 20:18:21 +00:00
|
|
|
ARCH_TRACE_CLOCKS
|
2009-08-25 08:12:56 +00:00
|
|
|
};
|
|
|
|
|
2018-01-16 02:51:48 +00:00
|
|
|
bool trace_clock_in_ns(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
if (trace_clocks[tr->clock_id].in_ns)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-09-11 15:29:27 +00:00
|
|
|
/*
|
|
|
|
* trace_parser_get_init - gets the buffer for trace parser
|
|
|
|
*/
|
|
|
|
int trace_parser_get_init(struct trace_parser *parser, int size)
|
|
|
|
{
|
|
|
|
memset(parser, 0, sizeof(*parser));
|
|
|
|
|
|
|
|
parser->buffer = kmalloc(size, GFP_KERNEL);
|
|
|
|
if (!parser->buffer)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
parser->size = size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* trace_parser_put - frees the buffer for trace parser
|
|
|
|
*/
|
|
|
|
void trace_parser_put(struct trace_parser *parser)
|
|
|
|
{
|
|
|
|
kfree(parser->buffer);
|
2017-02-02 22:58:18 +00:00
|
|
|
parser->buffer = NULL;
|
2009-09-11 15:29:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* trace_get_user - reads the user input string separated by space
|
|
|
|
* (matched by isspace(ch))
|
|
|
|
*
|
|
|
|
* For each string found the 'struct trace_parser' is updated,
|
|
|
|
* and the function returns.
|
|
|
|
*
|
|
|
|
* Returns number of bytes read.
|
|
|
|
*
|
|
|
|
* See kernel/trace/trace.h for 'struct trace_parser' details.
|
|
|
|
*/
|
|
|
|
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
char ch;
|
|
|
|
size_t read = 0;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
if (!*ppos)
|
|
|
|
trace_parser_clear(parser);
|
|
|
|
|
|
|
|
ret = get_user(ch, ubuf++);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
read++;
|
|
|
|
cnt--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The parser is not finished with the last write,
|
|
|
|
* continue reading the user input without skipping spaces.
|
|
|
|
*/
|
|
|
|
if (!parser->cont) {
|
|
|
|
/* skip white space */
|
|
|
|
while (cnt && isspace(ch)) {
|
|
|
|
ret = get_user(ch, ubuf++);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
read++;
|
|
|
|
cnt--;
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:02:29 +00:00
|
|
|
parser->idx = 0;
|
|
|
|
|
2009-09-11 15:29:27 +00:00
|
|
|
/* only spaces were written */
|
2018-01-16 09:02:28 +00:00
|
|
|
if (isspace(ch) || !ch) {
|
2009-09-11 15:29:27 +00:00
|
|
|
*ppos += read;
|
|
|
|
ret = read;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read the non-space input */
|
2018-01-16 09:02:28 +00:00
|
|
|
while (cnt && !isspace(ch) && ch) {
|
2009-09-22 05:51:54 +00:00
|
|
|
if (parser->idx < parser->size - 1)
|
2009-09-11 15:29:27 +00:00
|
|
|
parser->buffer[parser->idx++] = ch;
|
|
|
|
else {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = get_user(ch, ubuf++);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
read++;
|
|
|
|
cnt--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We either got finished input or we have to wait for another call. */
|
2018-01-16 09:02:28 +00:00
|
|
|
if (isspace(ch) || !ch) {
|
2009-09-11 15:29:27 +00:00
|
|
|
parser->buffer[parser->idx] = 0;
|
|
|
|
parser->cont = false;
|
2013-10-10 02:23:23 +00:00
|
|
|
} else if (parser->idx < parser->size - 1) {
|
2009-09-11 15:29:27 +00:00
|
|
|
parser->cont = true;
|
|
|
|
parser->buffer[parser->idx++] = ch;
|
2018-01-16 09:02:30 +00:00
|
|
|
/* Make sure the parsed string always terminates with '\0'. */
|
|
|
|
parser->buffer[parser->idx] = 0;
|
2013-10-10 02:23:23 +00:00
|
|
|
} else {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2009-09-11 15:29:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*ppos += read;
|
|
|
|
ret = read;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-25 19:54:42 +00:00
|
|
|
/* TODO add a seq_buf_to_buffer() */
|
2009-03-22 17:11:11 +00:00
|
|
|
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
|
2009-02-09 06:15:56 +00:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
2014-11-14 20:49:41 +00:00
|
|
|
if (trace_seq_used(s) <= s->seq.readpos)
|
2009-02-09 06:15:56 +00:00
|
|
|
return -EBUSY;
|
|
|
|
|
2014-11-14 20:49:41 +00:00
|
|
|
len = trace_seq_used(s) - s->seq.readpos;
|
2009-02-09 06:15:56 +00:00
|
|
|
if (cnt > len)
|
|
|
|
cnt = len;
|
2014-06-25 19:54:42 +00:00
|
|
|
memcpy(buf, s->buffer + s->seq.readpos, cnt);
|
2009-02-09 06:15:56 +00:00
|
|
|
|
2014-06-25 19:54:42 +00:00
|
|
|
s->seq.readpos += cnt;
|
2009-02-09 06:15:56 +00:00
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2010-02-25 23:36:43 +00:00
|
|
|
unsigned long __read_mostly tracing_thresh;
|
2019-10-08 22:08:21 +00:00
|
|
|
static const struct file_operations tracing_max_lat_fops;
|
|
|
|
|
|
|
|
#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
|
|
|
|
defined(CONFIG_FSNOTIFY)
|
|
|
|
|
|
|
|
static struct workqueue_struct *fsnotify_wq;
|
|
|
|
|
|
|
|
static void latency_fsnotify_workfn(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = container_of(work, struct trace_array,
|
|
|
|
fsnotify_work);
|
2020-07-22 12:58:44 +00:00
|
|
|
fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
|
2019-10-08 22:08:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = container_of(iwork, struct trace_array,
|
|
|
|
fsnotify_irqwork);
|
|
|
|
queue_work(fsnotify_wq, &tr->fsnotify_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void trace_create_maxlat_file(struct trace_array *tr,
|
|
|
|
struct dentry *d_tracer)
|
|
|
|
{
|
|
|
|
INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
|
|
|
|
init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
|
|
|
|
tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
|
|
|
|
d_tracer, &tr->max_latency,
|
|
|
|
&tracing_max_lat_fops);
|
|
|
|
}
|
|
|
|
|
|
|
|
__init static int latency_fsnotify_init(void)
|
|
|
|
{
|
|
|
|
fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
|
|
|
|
WQ_UNBOUND | WQ_HIGHPRI, 0);
|
|
|
|
if (!fsnotify_wq) {
|
|
|
|
pr_err("Unable to allocate tr_max_lat_wq\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
late_initcall_sync(latency_fsnotify_init);
|
|
|
|
|
|
|
|
void latency_fsnotify(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
if (!fsnotify_wq)
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* We cannot call queue_work(&tr->fsnotify_work) from here because it's
|
|
|
|
* possible that we are called from __schedule() or do_idle(), which
|
|
|
|
* could cause a deadlock.
|
|
|
|
*/
|
|
|
|
irq_work_queue(&tr->fsnotify_irqwork);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
|
|
|
|
* defined(CONFIG_FSNOTIFY)
|
|
|
|
*/
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define trace_create_maxlat_file(tr, d_tracer) \
|
|
|
|
trace_create_file("tracing_max_latency", 0644, d_tracer, \
|
|
|
|
&tr->max_latency, &tracing_max_lat_fops)
|
|
|
|
|
|
|
|
#endif
|
2010-02-25 23:36:43 +00:00
|
|
|
|
2009-08-27 20:52:21 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
/*
|
|
|
|
* Copy the new maximum trace into the separate maximum-trace
|
|
|
|
* structure. (this way the maximum trace is permanently saved,
|
2017-10-19 06:32:33 +00:00
|
|
|
* for later retrieval via /sys/kernel/tracing/tracing_max_latency)
|
2009-08-27 20:52:21 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
struct array_buffer *trace_buf = &tr->array_buffer;
|
|
|
|
struct array_buffer *max_buf = &tr->max_buffer;
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
|
|
|
|
struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
|
2009-08-27 20:52:21 +00:00
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
max_buf->cpu = cpu;
|
|
|
|
max_buf->time_start = data->preempt_timestamp;
|
2009-08-27 20:52:21 +00:00
|
|
|
|
2014-01-14 16:28:38 +00:00
|
|
|
max_data->saved_latency = tr->max_latency;
|
2009-09-02 16:27:41 +00:00
|
|
|
max_data->critical_start = data->critical_start;
|
|
|
|
max_data->critical_end = data->critical_end;
|
2009-08-27 20:52:21 +00:00
|
|
|
|
2019-03-05 16:12:00 +00:00
|
|
|
strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
|
2009-09-02 16:27:41 +00:00
|
|
|
max_data->pid = tsk->pid;
|
tracing: Use current_uid() for critical time tracing
The irqsoff tracer records the max time that interrupts are disabled.
There are hooks in the assembly code that calls back into the tracer when
interrupts are disabled or enabled.
When they are enabled, the tracer checks if the amount of time they
were disabled is larger than the previous recorded max interrupts off
time. If it is, it creates a snapshot of the currently running trace
to store where the last largest interrupts off time was held and how
it happened.
During testing, this RCU lockdep dump appeared:
[ 1257.829021] ===============================
[ 1257.829021] [ INFO: suspicious RCU usage. ]
[ 1257.829021] 3.10.0-rc1-test+ #171 Tainted: G W
[ 1257.829021] -------------------------------
[ 1257.829021] /home/rostedt/work/git/linux-trace.git/include/linux/rcupdate.h:780 rcu_read_lock() used illegally while idle!
[ 1257.829021]
[ 1257.829021] other info that might help us debug this:
[ 1257.829021]
[ 1257.829021]
[ 1257.829021] RCU used illegally from idle CPU!
[ 1257.829021] rcu_scheduler_active = 1, debug_locks = 0
[ 1257.829021] RCU used illegally from extended quiescent state!
[ 1257.829021] 2 locks held by trace-cmd/4831:
[ 1257.829021] #0: (max_trace_lock){......}, at: [<ffffffff810e2b77>] stop_critical_timing+0x1a3/0x209
[ 1257.829021] #1: (rcu_read_lock){.+.+..}, at: [<ffffffff810dae5a>] __update_max_tr+0x88/0x1ee
[ 1257.829021]
[ 1257.829021] stack backtrace:
[ 1257.829021] CPU: 3 PID: 4831 Comm: trace-cmd Tainted: G W 3.10.0-rc1-test+ #171
[ 1257.829021] Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007
[ 1257.829021] 0000000000000001 ffff880065f49da8 ffffffff8153dd2b ffff880065f49dd8
[ 1257.829021] ffffffff81092a00 ffff88006bd78680 ffff88007add7500 0000000000000003
[ 1257.829021] ffff88006bd78680 ffff880065f49e18 ffffffff810daebf ffffffff810dae5a
[ 1257.829021] Call Trace:
[ 1257.829021] [<ffffffff8153dd2b>] dump_stack+0x19/0x1b
[ 1257.829021] [<ffffffff81092a00>] lockdep_rcu_suspicious+0x109/0x112
[ 1257.829021] [<ffffffff810daebf>] __update_max_tr+0xed/0x1ee
[ 1257.829021] [<ffffffff810dae5a>] ? __update_max_tr+0x88/0x1ee
[ 1257.829021] [<ffffffff811002b9>] ? user_enter+0xfd/0x107
[ 1257.829021] [<ffffffff810dbf85>] update_max_tr_single+0x11d/0x12d
[ 1257.829021] [<ffffffff811002b9>] ? user_enter+0xfd/0x107
[ 1257.829021] [<ffffffff810e2b15>] stop_critical_timing+0x141/0x209
[ 1257.829021] [<ffffffff8109569a>] ? trace_hardirqs_on+0xd/0xf
[ 1257.829021] [<ffffffff811002b9>] ? user_enter+0xfd/0x107
[ 1257.829021] [<ffffffff810e3057>] time_hardirqs_on+0x2a/0x2f
[ 1257.829021] [<ffffffff811002b9>] ? user_enter+0xfd/0x107
[ 1257.829021] [<ffffffff8109550c>] trace_hardirqs_on_caller+0x16/0x197
[ 1257.829021] [<ffffffff8109569a>] trace_hardirqs_on+0xd/0xf
[ 1257.829021] [<ffffffff811002b9>] user_enter+0xfd/0x107
[ 1257.829021] [<ffffffff810029b4>] do_notify_resume+0x92/0x97
[ 1257.829021] [<ffffffff8154bdca>] int_signal+0x12/0x17
What happened was entering into the user code, the interrupts were enabled
and a max interrupts off was recorded. The trace buffer was saved along with
various information about the task: comm, pid, uid, priority, etc.
The uid is recorded with task_uid(tsk). But this is a macro that uses rcu_read_lock()
to retrieve the data, and this happened to happen where RCU is blind (user_enter).
As only the preempt and irqs off tracers can have this happen, and they both
only have the tsk == current, if tsk == current, use current_uid() instead of
task_uid(), as current_uid() does not use RCU as only current can change its uid.
This fixes the RCU suspicious splat.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-05-31 01:10:37 +00:00
|
|
|
/*
|
|
|
|
* If tsk == current, then use current_uid(), as that does not use
|
|
|
|
* RCU. The irq tracer can be called out of RCU scope.
|
|
|
|
*/
|
|
|
|
if (tsk == current)
|
|
|
|
max_data->uid = current_uid();
|
|
|
|
else
|
|
|
|
max_data->uid = task_uid(tsk);
|
|
|
|
|
2009-09-02 16:27:41 +00:00
|
|
|
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
|
|
|
|
max_data->policy = tsk->policy;
|
|
|
|
max_data->rt_priority = tsk->rt_priority;
|
2009-08-27 20:52:21 +00:00
|
|
|
|
|
|
|
/* record this tasks comm */
|
|
|
|
tracing_record_cmdline(tsk);
|
2019-10-08 22:08:21 +00:00
|
|
|
latency_fsnotify(tr);
|
2009-08-27 20:52:21 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
/**
|
|
|
|
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
|
|
|
|
* @tr: tracer
|
|
|
|
* @tsk: the task with the latency
|
|
|
|
* @cpu: The cpu that initiated the trace.
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
* @cond_data: User data associated with a conditional snapshot
|
2008-05-12 19:21:00 +00:00
|
|
|
*
|
|
|
|
* Flip the buffers between the @tr and the max_tr and record information
|
|
|
|
* about which task was the cause of this latency.
|
|
|
|
*/
|
2008-05-12 19:20:51 +00:00
|
|
|
void
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
|
|
|
|
void *cond_data)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->stop_count)
|
2009-09-01 02:32:27 +00:00
|
|
|
return;
|
|
|
|
|
2008-05-12 19:20:43 +00:00
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
2013-01-22 18:35:11 +00:00
|
|
|
|
2013-03-05 23:25:02 +00:00
|
|
|
if (!tr->allocated_snapshot) {
|
2012-12-26 02:53:00 +00:00
|
|
|
/* Only the nop tracer should hit this when disabling */
|
2012-05-11 17:29:49 +00:00
|
|
|
WARN_ON_ONCE(tr->current_trace != &nop_trace);
|
2013-01-22 18:35:11 +00:00
|
|
|
return;
|
2012-12-26 02:53:00 +00:00
|
|
|
}
|
2013-01-22 18:35:11 +00:00
|
|
|
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_lock(&tr->max_lock);
|
2008-09-30 03:02:41 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
/* Inherit the recordable setting from array_buffer */
|
|
|
|
if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
|
2018-07-13 16:28:15 +00:00
|
|
|
ring_buffer_record_on(tr->max_buffer.buffer);
|
|
|
|
else
|
|
|
|
ring_buffer_record_off(tr->max_buffer.buffer);
|
|
|
|
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
|
|
if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
|
|
|
|
goto out_unlock;
|
|
|
|
#endif
|
2020-01-09 23:53:48 +00:00
|
|
|
swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
|
2008-09-30 03:02:41 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
__update_max_tr(tr, tsk, cpu);
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
|
|
|
|
out_unlock:
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_unlock(&tr->max_lock);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* update_max_tr_single - only copy one trace over, and reset the rest
|
2019-08-28 05:25:47 +00:00
|
|
|
* @tr: tracer
|
|
|
|
* @tsk: task with the latency
|
|
|
|
* @cpu: the cpu of the buffer to copy.
|
2008-05-12 19:21:00 +00:00
|
|
|
*
|
|
|
|
* Flip the trace of a single CPU buffer between the @tr and the max_tr.
|
2008-05-12 19:20:42 +00:00
|
|
|
*/
|
2008-05-12 19:20:51 +00:00
|
|
|
void
|
2008-05-12 19:20:42 +00:00
|
|
|
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
|
{
|
2008-09-30 03:02:41 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->stop_count)
|
2009-09-01 02:32:27 +00:00
|
|
|
return;
|
|
|
|
|
2008-05-12 19:20:43 +00:00
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
2013-04-30 00:08:14 +00:00
|
|
|
if (!tr->allocated_snapshot) {
|
2013-03-26 21:33:00 +00:00
|
|
|
/* Only the nop tracer should hit this when disabling */
|
Tracing updates for Linux 3.10
Along with the usual minor fixes and clean ups there are a few major
changes with this pull request.
1) Multiple buffers for the ftrace facility
This feature has been requested by many people over the last few years.
I even heard that Google was about to implement it themselves. I finally
had time and cleaned up the code such that you can now create multiple
instances of the ftrace buffer and have different events go to different
buffers. This way, a low frequency event will not be lost in the noise
of a high frequency event.
Note, currently only events can go to different buffers, the tracers
(ie. function, function_graph and the latency tracers) still can only
be written to the main buffer.
2) The function tracer triggers have now been extended.
The function tracer had two triggers. One to enable tracing when a
function is hit, and one to disable tracing. Now you can record a
stack trace on a single (or many) function(s), take a snapshot of the
buffer (copy it to the snapshot buffer), and you can enable or disable
an event to be traced when a function is hit.
3) A perf clock has been added.
A "perf" clock can be chosen to be used when tracing. This will cause
ftrace to use the same clock as perf uses, and hopefully this will make
it easier to interleave the perf and ftrace data for analysis.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.12 (GNU/Linux)
iQEcBAABAgAGBQJRfnTPAAoJEOdOSU1xswtMqYYH/1WIdrwXmxHflErnYkCIr3sU
QtYae2K5A1HcgiqOvRJrdWMOt016iMx5CaQQyBFM1vvMiPY0sTWRmwNxDfZzz9LN
10jRvWEzZSLtzl+a9mkFWLEpr5nR/QODOxkWFCnRWscp46sp04LSTxGDYsOnPQZB
sam/AQ1h4xA+DqDBChm9BDEUEPorGleTlN54LBaCGgSFGvrbF+eAg2s4vHNAQAvQ
8d5xjSE9zC7J+FqbVxvJTbKI3+EqKL6hMsJKsKfi0SI+FuxBaFMSltXck5zKyTI4
HpNJzXCmw+v90Tju7oMkPHh6RTbESPCHoGU+wqE52fM6m7oScVeuI/kfc6USwU4=
=W1n+
-----END PGP SIGNATURE-----
Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"Along with the usual minor fixes and clean ups there are a few major
changes with this pull request.
1) Multiple buffers for the ftrace facility
This feature has been requested by many people over the last few
years. I even heard that Google was about to implement it themselves.
I finally had time and cleaned up the code such that you can now
create multiple instances of the ftrace buffer and have different
events go to different buffers. This way, a low frequency event will
not be lost in the noise of a high frequency event.
Note, currently only events can go to different buffers, the tracers
(ie function, function_graph and the latency tracers) still can only
be written to the main buffer.
2) The function tracer triggers have now been extended.
The function tracer had two triggers. One to enable tracing when a
function is hit, and one to disable tracing. Now you can record a
stack trace on a single (or many) function(s), take a snapshot of the
buffer (copy it to the snapshot buffer), and you can enable or disable
an event to be traced when a function is hit.
3) A perf clock has been added.
A "perf" clock can be chosen to be used when tracing. This will cause
ftrace to use the same clock as perf uses, and hopefully this will
make it easier to interleave the perf and ftrace data for analysis."
* tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits)
tracepoints: Prevent null probe from being added
tracing: Compare to 1 instead of zero for is_signed_type()
tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT
ftrace: Get rid of ftrace_profile_bits
tracing: Check return value of tracing_init_dentry()
tracing: Get rid of unneeded key calculation in ftrace_hash_move()
tracing: Reset ftrace_graph_filter_enabled if count is zero
tracing: Fix off-by-one on allocating stat->pages
kernel: tracing: Use strlcpy instead of strncpy
tracing: Update debugfs README file
tracing: Fix ftrace_dump()
tracing: Rename trace_event_mutex to trace_event_sem
tracing: Fix comment about prefix in arch_syscall_match_sym_name()
tracing: Convert trace_destroy_fields() to static
tracing: Move find_event_field() into trace_events.c
tracing: Use TRACE_MAX_PRINT instead of constant
tracing: Use pr_warn_once instead of open coded implementation
ring-buffer: Add ring buffer startup selftest
tracing: Bring Documentation/trace/ftrace.txt up to date
tracing: Add "perf" trace_clock
...
Conflicts:
kernel/trace/ftrace.c
kernel/trace/trace.c
2013-04-29 20:55:38 +00:00
|
|
|
WARN_ON_ONCE(tr->current_trace != &nop_trace);
|
2010-07-01 05:34:35 +00:00
|
|
|
return;
|
2013-03-26 21:33:00 +00:00
|
|
|
}
|
2010-07-01 05:34:35 +00:00
|
|
|
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_lock(&tr->max_lock);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
|
2008-09-30 03:02:41 +00:00
|
|
|
|
2009-09-03 23:13:05 +00:00
|
|
|
if (ret == -EBUSY) {
|
|
|
|
/*
|
|
|
|
* We failed to swap the buffer due to a commit taking
|
|
|
|
* place on this CPU. We fail to record, but we reset
|
|
|
|
* the max trace buffer (no one writes directly to it)
|
|
|
|
* and flag that it failed.
|
|
|
|
*/
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
|
2009-09-03 23:13:05 +00:00
|
|
|
"Failed to swap buffers due to commit in progress\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
__update_max_tr(tr, tsk, cpu);
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_unlock(&tr->max_lock);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
2009-08-27 20:52:21 +00:00
|
|
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2018-11-30 01:32:26 +00:00
|
|
|
static int wait_on_pipe(struct trace_iterator *iter, int full)
|
2012-11-02 00:54:21 +00:00
|
|
|
{
|
2013-03-01 00:59:17 +00:00
|
|
|
/* Iterators are static, they should be filled or empty */
|
|
|
|
if (trace_buffer_iter(iter, iter->cpu_file))
|
2014-06-10 13:46:00 +00:00
|
|
|
return 0;
|
2012-11-02 00:54:21 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
|
2014-11-10 18:46:34 +00:00
|
|
|
full);
|
2012-11-02 00:54:21 +00:00
|
|
|
}
|
|
|
|
|
2013-03-07 16:10:56 +00:00
|
|
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
2017-03-24 21:59:10 +00:00
|
|
|
static bool selftests_can_run;
|
|
|
|
|
|
|
|
struct trace_selftests {
|
|
|
|
struct list_head list;
|
|
|
|
struct tracer *type;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(postponed_selftests);
|
|
|
|
|
|
|
|
static int save_selftest(struct tracer *type)
|
|
|
|
{
|
|
|
|
struct trace_selftests *selftest;
|
|
|
|
|
|
|
|
selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
|
|
|
|
if (!selftest)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
selftest->type = type;
|
|
|
|
list_add(&selftest->list, &postponed_selftests);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-07 16:10:56 +00:00
|
|
|
static int run_tracer_selftest(struct tracer *type)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = &global_trace;
|
|
|
|
struct tracer *saved_tracer = tr->current_trace;
|
|
|
|
int ret;
|
2012-11-02 00:54:21 +00:00
|
|
|
|
2013-03-07 16:10:56 +00:00
|
|
|
if (!type->selftest || tracing_selftest_disabled)
|
|
|
|
return 0;
|
2012-11-02 00:54:21 +00:00
|
|
|
|
2017-03-24 21:59:10 +00:00
|
|
|
/*
|
|
|
|
* If a tracer registers early in boot up (before scheduling is
|
|
|
|
* initialized and such), then do not run its selftests yet.
|
|
|
|
* Instead, run it a little later in the boot process.
|
|
|
|
*/
|
|
|
|
if (!selftests_can_run)
|
|
|
|
return save_selftest(type);
|
|
|
|
|
2012-11-02 00:54:21 +00:00
|
|
|
/*
|
2013-03-07 16:10:56 +00:00
|
|
|
* Run a selftest on this tracer.
|
|
|
|
* Here we reset the trace buffer, and set the current
|
|
|
|
* tracer to be this tracer. The tracer can then run some
|
|
|
|
* internal tracing to verify that everything is in order.
|
|
|
|
* If we fail, we do not register this tracer.
|
2012-11-02 00:54:21 +00:00
|
|
|
*/
|
2020-01-09 23:53:48 +00:00
|
|
|
tracing_reset_online_cpus(&tr->array_buffer);
|
2012-11-02 00:54:21 +00:00
|
|
|
|
2013-03-07 16:10:56 +00:00
|
|
|
tr->current_trace = type;
|
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
if (type->use_max_tr) {
|
|
|
|
/* If we expanded the buffers, make sure the max is expanded too */
|
|
|
|
if (ring_buffer_expanded)
|
|
|
|
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
|
|
|
|
RING_BUFFER_ALL_CPUS);
|
|
|
|
tr->allocated_snapshot = true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* the test is responsible for initializing and enabling */
|
|
|
|
pr_info("Testing tracer %s: ", type->name);
|
|
|
|
ret = type->selftest(type, tr);
|
|
|
|
/* the test is responsible for resetting too */
|
|
|
|
tr->current_trace = saved_tracer;
|
|
|
|
if (ret) {
|
|
|
|
printk(KERN_CONT "FAILED!\n");
|
|
|
|
/* Add the warning after printing 'FAILED' */
|
|
|
|
WARN_ON(1);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* Only reset on passing, to avoid touching corrupted buffers */
|
2020-01-09 23:53:48 +00:00
|
|
|
tracing_reset_online_cpus(&tr->array_buffer);
|
2013-03-07 16:10:56 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
if (type->use_max_tr) {
|
|
|
|
tr->allocated_snapshot = false;
|
2012-11-02 00:54:21 +00:00
|
|
|
|
2013-03-07 16:10:56 +00:00
|
|
|
/* Shrink the max buffer again */
|
|
|
|
if (ring_buffer_expanded)
|
|
|
|
ring_buffer_resize(tr->max_buffer.buffer, 1,
|
|
|
|
RING_BUFFER_ALL_CPUS);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
printk(KERN_CONT "PASSED\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2017-03-24 21:59:10 +00:00
|
|
|
|
|
|
|
static __init int init_trace_selftests(void)
|
|
|
|
{
|
|
|
|
struct trace_selftests *p, *n;
|
|
|
|
struct tracer *t, **last;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
selftests_can_run = true;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
|
|
if (list_empty(&postponed_selftests))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pr_info("Running postponed tracer tests:\n");
|
|
|
|
|
2020-02-20 20:38:01 +00:00
|
|
|
tracing_selftest_running = true;
|
2017-03-24 21:59:10 +00:00
|
|
|
list_for_each_entry_safe(p, n, &postponed_selftests, list) {
|
2018-11-30 14:56:22 +00:00
|
|
|
/* This loop can take minutes when sanitizers are enabled, so
|
|
|
|
* lets make sure we allow RCU processing.
|
|
|
|
*/
|
|
|
|
cond_resched();
|
2017-03-24 21:59:10 +00:00
|
|
|
ret = run_tracer_selftest(p->type);
|
|
|
|
/* If the test fails, then warn and remove from available_tracers */
|
|
|
|
if (ret < 0) {
|
|
|
|
WARN(1, "tracer: %s failed selftest, disabling\n",
|
|
|
|
p->type->name);
|
|
|
|
last = &trace_types;
|
|
|
|
for (t = trace_types; t; t = t->next) {
|
|
|
|
if (t == p->type) {
|
|
|
|
*last = t->next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last = &t->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_del(&p->list);
|
|
|
|
kfree(p);
|
|
|
|
}
|
2020-02-20 20:38:01 +00:00
|
|
|
tracing_selftest_running = false;
|
2017-03-24 21:59:10 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-05-17 15:14:35 +00:00
|
|
|
core_initcall(init_trace_selftests);
|
2013-03-07 16:10:56 +00:00
|
|
|
#else
|
|
|
|
static inline int run_tracer_selftest(struct tracer *type)
|
|
|
|
{
|
|
|
|
return 0;
|
2012-11-02 00:54:21 +00:00
|
|
|
}
|
2013-03-07 16:10:56 +00:00
|
|
|
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
2012-11-02 00:54:21 +00:00
|
|
|
|
2015-09-29 21:31:55 +00:00
|
|
|
static void add_tracer_options(struct trace_array *tr, struct tracer *t);
|
|
|
|
|
2015-11-04 01:14:29 +00:00
|
|
|
static void __init apply_trace_boot_options(void);
|
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
/**
|
|
|
|
* register_tracer - register a tracer with the ftrace system.
|
2019-08-28 05:25:47 +00:00
|
|
|
* @type: the plugin for the tracer
|
2008-05-12 19:21:00 +00:00
|
|
|
*
|
|
|
|
* Register a new plugin tracer.
|
|
|
|
*/
|
2015-11-04 01:14:29 +00:00
|
|
|
int __init register_tracer(struct tracer *type)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
|
|
|
struct tracer *t;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!type->name) {
|
|
|
|
pr_info("Tracer must have a name\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-07-10 10:06:44 +00:00
|
|
|
if (strlen(type->name) >= MAX_TRACER_SIZE) {
|
2009-09-18 06:06:47 +00:00
|
|
|
pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-12-02 21:25:27 +00:00
|
|
|
if (security_locked_down(LOCKDOWN_TRACEFS)) {
|
2019-12-05 22:25:03 +00:00
|
|
|
pr_warn("Can not register tracer %s due to lockdown\n",
|
2019-12-02 21:25:27 +00:00
|
|
|
type->name);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2008-11-19 09:00:15 +00:00
|
|
|
|
2008-12-06 02:41:33 +00:00
|
|
|
tracing_selftest_running = true;
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
for (t = trace_types; t; t = t->next) {
|
|
|
|
if (strcmp(type->name, t->name) == 0) {
|
|
|
|
/* already found */
|
2009-09-18 06:06:47 +00:00
|
|
|
pr_info("Tracer %s already registered\n",
|
2008-05-12 19:20:42 +00:00
|
|
|
type->name);
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-17 18:23:42 +00:00
|
|
|
if (!type->set_flag)
|
|
|
|
type->set_flag = &dummy_set_flag;
|
2016-03-08 13:37:01 +00:00
|
|
|
if (!type->flags) {
|
|
|
|
/*allocate a dummy tracer_flags*/
|
|
|
|
type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
|
2016-03-14 12:35:41 +00:00
|
|
|
if (!type->flags) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2016-03-08 13:37:01 +00:00
|
|
|
type->flags->val = 0;
|
|
|
|
type->flags->opts = dummy_tracer_opt;
|
|
|
|
} else
|
2008-11-17 18:23:42 +00:00
|
|
|
if (!type->flags->opts)
|
|
|
|
type->flags->opts = dummy_tracer_opt;
|
2009-02-11 01:25:00 +00:00
|
|
|
|
2016-03-08 13:37:01 +00:00
|
|
|
/* store the tracer for __set_tracer_option */
|
|
|
|
type->flags->trace = type;
|
|
|
|
|
2013-03-07 16:10:56 +00:00
|
|
|
ret = run_tracer_selftest(type);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2008-05-12 19:20:44 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
type->next = trace_types;
|
|
|
|
trace_types = type;
|
2015-09-29 21:31:55 +00:00
|
|
|
add_tracer_options(&global_trace, type);
|
2008-05-12 19:20:44 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
out:
|
2008-12-06 02:41:33 +00:00
|
|
|
tracing_selftest_running = false;
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2009-02-05 06:13:38 +00:00
|
|
|
if (ret || !default_bootup_tracer)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2009-09-18 06:06:47 +00:00
|
|
|
if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
|
2009-02-05 06:13:38 +00:00
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
|
|
|
|
/* Do we want this tracer to start on bootup? */
|
2013-11-07 03:42:48 +00:00
|
|
|
tracing_set_tracer(&global_trace, type->name);
|
2009-02-05 06:13:38 +00:00
|
|
|
default_bootup_tracer = NULL;
|
2015-11-04 01:14:29 +00:00
|
|
|
|
|
|
|
apply_trace_boot_options();
|
|
|
|
|
2009-02-05 06:13:38 +00:00
|
|
|
/* disable other selftests, since this will break it. */
|
2013-03-08 03:48:09 +00:00
|
|
|
tracing_selftest_disabled = true;
|
2009-02-03 02:38:32 +00:00
|
|
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
2009-02-05 06:13:38 +00:00
|
|
|
printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
|
|
|
|
type->name);
|
2009-02-03 02:38:32 +00:00
|
|
|
#endif
|
|
|
|
|
2009-02-05 06:13:38 +00:00
|
|
|
out_unlock:
|
2008-05-12 19:20:42 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
|
2009-09-04 16:35:16 +00:00
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer = buf->buffer;
|
2009-09-04 16:35:16 +00:00
|
|
|
|
2012-12-19 07:02:34 +00:00
|
|
|
if (!buffer)
|
|
|
|
return;
|
|
|
|
|
2009-09-04 16:35:16 +00:00
|
|
|
ring_buffer_record_disable(buffer);
|
|
|
|
|
|
|
|
/* Make sure all commits have finished */
|
2018-11-07 02:44:52 +00:00
|
|
|
synchronize_rcu();
|
2012-05-09 00:57:53 +00:00
|
|
|
ring_buffer_reset_cpu(buffer, cpu);
|
2009-09-04 16:35:16 +00:00
|
|
|
|
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
void tracing_reset_online_cpus(struct array_buffer *buf)
|
2008-12-19 10:08:39 +00:00
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer = buf->buffer;
|
2008-12-19 10:08:39 +00:00
|
|
|
|
2012-12-19 07:02:34 +00:00
|
|
|
if (!buffer)
|
|
|
|
return;
|
|
|
|
|
2009-09-04 16:02:35 +00:00
|
|
|
ring_buffer_record_disable(buffer);
|
|
|
|
|
|
|
|
/* Make sure all commits have finished */
|
2018-11-07 02:44:52 +00:00
|
|
|
synchronize_rcu();
|
2009-09-04 16:02:35 +00:00
|
|
|
|
2013-08-03 01:36:16 +00:00
|
|
|
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
|
2008-12-19 10:08:39 +00:00
|
|
|
|
2020-06-25 05:34:03 +00:00
|
|
|
ring_buffer_reset_online_cpus(buffer);
|
2009-09-04 16:02:35 +00:00
|
|
|
|
|
|
|
ring_buffer_record_enable(buffer);
|
2008-12-19 10:08:39 +00:00
|
|
|
}
|
|
|
|
|
2013-07-24 02:21:59 +00:00
|
|
|
/* Must have trace_types_lock held */
|
2013-03-05 04:26:06 +00:00
|
|
|
void tracing_reset_all_online_cpus(void)
|
2009-05-07 01:54:09 +00:00
|
|
|
{
|
2013-03-05 04:26:06 +00:00
|
|
|
struct trace_array *tr;
|
|
|
|
|
|
|
|
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
tracing: Only have rmmod clear buffers that its events were active in
Currently, when a module event is enabled, when that module is removed, it
clears all ring buffers. This is to prevent another module from being loaded
and having one of its trace event IDs from reusing a trace event ID of the
removed module. This could cause undesirable effects as the trace event of
the new module would be using its own processing algorithms to process raw
data of another event. To prevent this, when a module is loaded, if any of
its events have been used (signified by the WAS_ENABLED event call flag,
which is never cleared), all ring buffers are cleared, just in case any one
of them contains event data of the removed event.
The problem is, there's no reason to clear all ring buffers if only one (or
less than all of them) uses one of the events. Instead, only clear the ring
buffers that recorded the events of a module that is being removed.
To do this, instead of keeping the WAS_ENABLED flag with the trace event
call, move it to the per instance (per ring buffer) event file descriptor.
The event file descriptor maps each event to a separate ring buffer
instance. Then when the module is removed, only the ring buffers that
activated one of the module's events get cleared. The rest are not touched.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-08-31 21:03:47 +00:00
|
|
|
if (!tr->clear_trace)
|
|
|
|
continue;
|
|
|
|
tr->clear_trace = false;
|
2020-01-09 23:53:48 +00:00
|
|
|
tracing_reset_online_cpus(&tr->array_buffer);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
tracing_reset_online_cpus(&tr->max_buffer);
|
|
|
|
#endif
|
2013-03-05 04:26:06 +00:00
|
|
|
}
|
2009-05-07 01:54:09 +00:00
|
|
|
}
|
|
|
|
|
2017-06-27 02:01:55 +00:00
|
|
|
static int *tgid_map;
|
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
#define SAVED_CMDLINES_DEFAULT 128
|
2009-03-18 08:03:19 +00:00
|
|
|
#define NO_CMDLINE_MAP UINT_MAX
|
2009-12-03 11:38:57 +00:00
|
|
|
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
2014-06-05 01:24:27 +00:00
|
|
|
struct saved_cmdlines_buffer {
|
|
|
|
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
|
|
|
unsigned *map_cmdline_to_pid;
|
|
|
|
unsigned cmdline_num;
|
|
|
|
int cmdline_idx;
|
|
|
|
char *saved_cmdlines;
|
|
|
|
};
|
|
|
|
static struct saved_cmdlines_buffer *savedcmd;
|
2008-05-12 19:21:00 +00:00
|
|
|
|
|
|
|
/* temporary disable recording */
|
2017-06-27 02:01:55 +00:00
|
|
|
static atomic_t trace_record_taskinfo_disabled __read_mostly;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
static inline char *get_saved_cmdlines(int idx)
|
|
|
|
{
|
|
|
|
return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_cmdline(int idx, const char *cmdline)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2019-03-05 16:12:00 +00:00
|
|
|
strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
|
2014-06-05 01:24:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int allocate_cmdlines_buffer(unsigned int val,
|
|
|
|
struct saved_cmdlines_buffer *s)
|
|
|
|
{
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
s->map_cmdline_to_pid = kmalloc_array(val,
|
|
|
|
sizeof(*s->map_cmdline_to_pid),
|
|
|
|
GFP_KERNEL);
|
2014-06-05 01:24:27 +00:00
|
|
|
if (!s->map_cmdline_to_pid)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
|
2014-06-05 01:24:27 +00:00
|
|
|
if (!s->saved_cmdlines) {
|
|
|
|
kfree(s->map_cmdline_to_pid);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->cmdline_idx = 0;
|
|
|
|
s->cmdline_num = val;
|
|
|
|
memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
|
|
|
|
sizeof(s->map_pid_to_cmdline));
|
|
|
|
memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
|
|
|
|
val * sizeof(*s->map_cmdline_to_pid));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int trace_create_savedcmd(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-06-10 07:11:35 +00:00
|
|
|
savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
|
2014-06-05 01:24:27 +00:00
|
|
|
if (!savedcmd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
|
|
|
|
if (ret < 0) {
|
|
|
|
kfree(savedcmd);
|
|
|
|
savedcmd = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2009-09-12 23:43:07 +00:00
|
|
|
int is_tracing_stopped(void)
|
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
return global_trace.stop_count;
|
2009-09-12 23:43:07 +00:00
|
|
|
}
|
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
/**
|
|
|
|
* tracing_start - quick start of the tracer
|
|
|
|
*
|
|
|
|
* If tracing is enabled but was stopped by tracing_stop,
|
|
|
|
* this will start the tracer back up.
|
|
|
|
*/
|
|
|
|
void tracing_start(void)
|
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2008-11-05 21:05:44 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (tracing_disabled)
|
|
|
|
return;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
|
|
|
|
if (--global_trace.stop_count) {
|
|
|
|
if (global_trace.stop_count < 0) {
|
2009-01-22 19:26:15 +00:00
|
|
|
/* Someone screwed up their debugging */
|
|
|
|
WARN_ON_ONCE(1);
|
2012-05-11 17:29:49 +00:00
|
|
|
global_trace.stop_count = 0;
|
2009-01-22 19:26:15 +00:00
|
|
|
}
|
2008-11-05 21:05:44 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-03-13 00:56:00 +00:00
|
|
|
/* Prevent the buffers from switching */
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_lock(&global_trace.max_lock);
|
2008-11-05 21:05:44 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = global_trace.array_buffer.buffer;
|
2008-11-05 21:05:44 +00:00
|
|
|
if (buffer)
|
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
buffer = global_trace.max_buffer.buffer;
|
2008-11-05 21:05:44 +00:00
|
|
|
if (buffer)
|
|
|
|
ring_buffer_record_enable(buffer);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2008-11-05 21:05:44 +00:00
|
|
|
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_unlock(&global_trace.max_lock);
|
2010-03-13 00:56:00 +00:00
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
out:
|
2012-05-11 17:29:49 +00:00
|
|
|
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tracing_start_tr(struct trace_array *tr)
|
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2012-05-11 17:29:49 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (tracing_disabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* If global, we need to also start the max tracer */
|
|
|
|
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
|
|
return tracing_start();
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&tr->start_lock, flags);
|
|
|
|
|
|
|
|
if (--tr->stop_count) {
|
|
|
|
if (tr->stop_count < 0) {
|
|
|
|
/* Someone screwed up their debugging */
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
tr->stop_count = 0;
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = tr->array_buffer.buffer;
|
2012-05-11 17:29:49 +00:00
|
|
|
if (buffer)
|
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
|
|
|
|
|
out:
|
|
|
|
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
2008-11-05 21:05:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tracing_stop - quick stop of the tracer
|
|
|
|
*
|
|
|
|
* Light weight way to stop tracing. Use in conjunction with
|
|
|
|
* tracing_start.
|
|
|
|
*/
|
|
|
|
void tracing_stop(void)
|
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2008-11-05 21:05:44 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
|
|
|
|
if (global_trace.stop_count++)
|
2008-11-05 21:05:44 +00:00
|
|
|
goto out;
|
|
|
|
|
2010-03-13 00:56:00 +00:00
|
|
|
/* Prevent the buffers from switching */
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_lock(&global_trace.max_lock);
|
2010-03-13 00:56:00 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = global_trace.array_buffer.buffer;
|
2008-11-05 21:05:44 +00:00
|
|
|
if (buffer)
|
|
|
|
ring_buffer_record_disable(buffer);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
buffer = global_trace.max_buffer.buffer;
|
2008-11-05 21:05:44 +00:00
|
|
|
if (buffer)
|
|
|
|
ring_buffer_record_disable(buffer);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2008-11-05 21:05:44 +00:00
|
|
|
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_unlock(&global_trace.max_lock);
|
2010-03-13 00:56:00 +00:00
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
out:
|
2012-05-11 17:29:49 +00:00
|
|
|
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tracing_stop_tr(struct trace_array *tr)
|
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2012-05-11 17:29:49 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* If global, we need to also stop the max tracer */
|
|
|
|
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
|
|
return tracing_stop();
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&tr->start_lock, flags);
|
|
|
|
if (tr->stop_count++)
|
|
|
|
goto out;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = tr->array_buffer.buffer;
|
2012-05-11 17:29:49 +00:00
|
|
|
if (buffer)
|
|
|
|
ring_buffer_record_disable(buffer);
|
|
|
|
|
|
|
|
out:
|
|
|
|
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
2008-11-05 21:05:44 +00:00
|
|
|
}
|
|
|
|
|
2014-05-30 13:42:39 +00:00
|
|
|
static int trace_save_cmdline(struct task_struct *tsk)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2009-03-18 08:00:41 +00:00
|
|
|
unsigned pid, idx;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2017-07-06 23:00:21 +00:00
|
|
|
/* treat recording of idle task as a success */
|
|
|
|
if (!tsk->pid)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely(tsk->pid > PID_MAX_DEFAULT))
|
2014-05-30 13:42:39 +00:00
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's not the end of the world if we don't get
|
|
|
|
* the lock, but we also don't want to spin
|
|
|
|
* nor do we want to disable interrupts,
|
|
|
|
* so if we miss here, then better luck next time.
|
|
|
|
*/
|
2009-12-02 19:01:25 +00:00
|
|
|
if (!arch_spin_trylock(&trace_cmdline_lock))
|
2014-05-30 13:42:39 +00:00
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
idx = savedcmd->map_pid_to_cmdline[tsk->pid];
|
2009-03-18 08:03:19 +00:00
|
|
|
if (idx == NO_CMDLINE_MAP) {
|
2014-06-05 01:24:27 +00:00
|
|
|
idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-03-18 08:00:41 +00:00
|
|
|
/*
|
|
|
|
* Check whether the cmdline buffer at idx has a pid
|
|
|
|
* mapped. We are going to overwrite that entry so we
|
|
|
|
* need to clear the map_pid_to_cmdline. Otherwise we
|
|
|
|
* would read the new comm for the old pid.
|
|
|
|
*/
|
2014-06-05 01:24:27 +00:00
|
|
|
pid = savedcmd->map_cmdline_to_pid[idx];
|
2009-03-18 08:00:41 +00:00
|
|
|
if (pid != NO_CMDLINE_MAP)
|
2014-06-05 01:24:27 +00:00
|
|
|
savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
|
|
|
|
savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
savedcmd->cmdline_idx = idx;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
set_cmdline(idx, tsk->comm);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-12-02 19:01:25 +00:00
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
2014-05-30 13:42:39 +00:00
|
|
|
|
|
|
|
return 1;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2014-05-30 14:49:46 +00:00
|
|
|
static void __trace_find_cmdline(int pid, char comm[])
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
|
|
|
unsigned map;
|
|
|
|
|
2009-03-16 23:20:15 +00:00
|
|
|
if (!pid) {
|
|
|
|
strcpy(comm, "<idle>");
|
|
|
|
return;
|
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2010-01-25 20:11:53 +00:00
|
|
|
if (WARN_ON_ONCE(pid < 0)) {
|
|
|
|
strcpy(comm, "<XXX>");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-03-16 23:20:15 +00:00
|
|
|
if (pid > PID_MAX_DEFAULT) {
|
|
|
|
strcpy(comm, "<...>");
|
|
|
|
return;
|
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
map = savedcmd->map_pid_to_cmdline[pid];
|
2009-03-18 07:58:44 +00:00
|
|
|
if (map != NO_CMDLINE_MAP)
|
2017-05-03 10:11:14 +00:00
|
|
|
strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
|
2009-03-18 07:58:44 +00:00
|
|
|
else
|
|
|
|
strcpy(comm, "<...>");
|
2014-05-30 14:49:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void trace_find_cmdline(int pid, char comm[])
|
|
|
|
{
|
|
|
|
preempt_disable();
|
|
|
|
arch_spin_lock(&trace_cmdline_lock);
|
|
|
|
|
|
|
|
__trace_find_cmdline(pid, comm);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-12-02 19:01:25 +00:00
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
2009-05-26 15:28:02 +00:00
|
|
|
preempt_enable();
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2017-06-27 02:01:55 +00:00
|
|
|
int trace_find_tgid(int pid)
|
|
|
|
{
|
|
|
|
if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return tgid_map[pid];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int trace_save_tgid(struct task_struct *tsk)
|
|
|
|
{
|
2017-07-06 23:00:22 +00:00
|
|
|
/* treat recording of idle task as a success */
|
|
|
|
if (!tsk->pid)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
|
2017-06-27 02:01:55 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
tgid_map[tsk->pid] = tsk->tgid;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool tracing_record_taskinfo_skip(int flags)
|
|
|
|
{
|
|
|
|
if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
|
|
|
|
return true;
|
|
|
|
if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
|
|
|
|
return true;
|
|
|
|
if (!__this_cpu_read(trace_taskinfo_save))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tracing_record_taskinfo - record the task info of a task
|
|
|
|
*
|
2019-08-28 05:25:47 +00:00
|
|
|
* @task: task to record
|
|
|
|
* @flags: TRACE_RECORD_CMDLINE for recording comm
|
|
|
|
* TRACE_RECORD_TGID for recording tgid
|
2017-06-27 02:01:55 +00:00
|
|
|
*/
|
|
|
|
void tracing_record_taskinfo(struct task_struct *task, int flags)
|
|
|
|
{
|
2017-07-06 23:00:23 +00:00
|
|
|
bool done;
|
|
|
|
|
2017-06-27 02:01:55 +00:00
|
|
|
if (tracing_record_taskinfo_skip(flags))
|
|
|
|
return;
|
2017-07-06 23:00:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Record as much task information as possible. If some fail, continue
|
|
|
|
* to try to record the others.
|
|
|
|
*/
|
|
|
|
done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
|
|
|
|
done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
|
|
|
|
|
|
|
|
/* If recording any information failed, retry again soon. */
|
|
|
|
if (!done)
|
2017-06-27 02:01:55 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
__this_cpu_write(trace_taskinfo_save, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tracing_record_taskinfo_sched_switch - record task info for sched_switch
|
|
|
|
*
|
2019-08-28 05:25:47 +00:00
|
|
|
* @prev: previous task during sched_switch
|
|
|
|
* @next: next task during sched_switch
|
|
|
|
* @flags: TRACE_RECORD_CMDLINE for recording comm
|
|
|
|
* TRACE_RECORD_TGID for recording tgid
|
2017-06-27 02:01:55 +00:00
|
|
|
*/
|
|
|
|
void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
|
|
|
|
struct task_struct *next, int flags)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2017-07-06 23:00:23 +00:00
|
|
|
bool done;
|
|
|
|
|
2017-06-27 02:01:55 +00:00
|
|
|
if (tracing_record_taskinfo_skip(flags))
|
|
|
|
return;
|
|
|
|
|
2017-07-06 23:00:23 +00:00
|
|
|
/*
|
|
|
|
* Record as much task information as possible. If some fail, continue
|
|
|
|
* to try to record the others.
|
|
|
|
*/
|
|
|
|
done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
|
|
|
|
done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
|
|
|
|
done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
|
|
|
|
done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2017-07-06 23:00:23 +00:00
|
|
|
/* If recording any information failed, retry again soon. */
|
|
|
|
if (!done)
|
2012-10-11 16:14:25 +00:00
|
|
|
return;
|
|
|
|
|
2017-06-27 02:01:55 +00:00
|
|
|
__this_cpu_write(trace_taskinfo_save, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helpers to record a specific task information */
|
|
|
|
void tracing_record_cmdline(struct task_struct *task)
|
|
|
|
{
|
|
|
|
tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tracing_record_tgid(struct task_struct *task)
|
|
|
|
{
|
|
|
|
tracing_record_taskinfo(task, TRACE_RECORD_TGID);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
tracing: Move trace_handle_return() out of line
Currently trace_handle_return() looks like this:
static inline enum print_line_t trace_handle_return(struct trace_seq *s)
{
return trace_seq_has_overflowed(s) ?
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
}
Where trace_seq_overflowed(s) is:
static inline bool trace_seq_has_overflowed(struct trace_seq *s)
{
return s->full || seq_buf_has_overflowed(&s->seq);
}
And seq_buf_has_overflowed(&s->seq) is:
static inline bool
seq_buf_has_overflowed(struct seq_buf *s)
{
return s->len > s->size;
}
Making trace_handle_return() into:
return (s->full || (s->seq->len > s->seq->size)) ?
TRACE_TYPE_PARTIAL_LINE :
TRACE_TYPE_HANDLED;
One would think this is not an issue to keep as an inline. But because this
is used in the TRACE_EVENT() macro, it is extended for every tracepoint in
the system. Taking a look at a single tracepoint x86_irq_vector (was the
first one I randomly chosen). As trace_handle_return is used in the
TRACE_EVENT() macro of trace_raw_output_##call() we disassemble
trace_raw_output_x86_irq_vector and do a diff:
- is the original
+ is the out-of-line code
I removed identical lines that were different just due to different
addresses.
--- /tmp/irq-vec-orig 2017-03-16 09:12:48.569384851 -0400
+++ /tmp/irq-vec-ool 2017-03-16 09:13:39.378153385 -0400
@@ -6,27 +6,23 @@
53 push %rbx
48 89 fb mov %rdi,%rbx
4c 8b a7 c0 20 00 00 mov 0x20c0(%rdi),%r12
e8 f7 72 13 00 callq ffffffff81155c80 <trace_raw_output_prep>
83 f8 01 cmp $0x1,%eax
74 05 je ffffffff8101e993 <trace_raw_output_x86_irq_vector+0x23>
5b pop %rbx
41 5c pop %r12
5d pop %rbp
c3 retq
41 8b 54 24 08 mov 0x8(%r12),%edx
- 48 8d bb 98 10 00 00 lea 0x1098(%rbx),%rdi
+ 48 81 c3 98 10 00 00 add $0x1098,%rbx
- 48 c7 c6 7b 8a a0 81 mov $0xffffffff81a08a7b,%rsi
+ 48 c7 c6 ab 8a a0 81 mov $0xffffffff81a08aab,%rsi
- e8 c5 85 13 00 callq ffffffff81156f70 <trace_seq_printf>
=== here's the start of the main difference ===
+ 48 89 df mov %rbx,%rdi
+ e8 62 7e 13 00 callq ffffffff81156810 <trace_seq_printf>
- 8b 93 b8 20 00 00 mov 0x20b8(%rbx),%edx
- 31 c0 xor %eax,%eax
- 85 d2 test %edx,%edx
- 75 11 jne ffffffff8101e9c8 <trace_raw_output_x86_irq_vector+0x58>
- 48 8b 83 a8 20 00 00 mov 0x20a8(%rbx),%rax
- 48 39 83 a0 20 00 00 cmp %rax,0x20a0(%rbx)
- 0f 93 c0 setae %al
+ 48 89 df mov %rbx,%rdi
+ e8 4a c5 12 00 callq ffffffff8114af00 <trace_handle_return>
5b pop %rbx
- 0f b6 c0 movzbl %al,%eax
=== end ===
41 5c pop %r12
5d pop %rbp
c3 retq
If you notice, the original has 22 bytes of text more than the out of line
version. As this is for every TRACE_EVENT() defined in the system, this can
become quite large.
text data bss dec hex filename
8690305 5450490 1298432 15439227 eb957b vmlinux-orig
8681725 5450490 1298432 15430647 eb73f7 vmlinux-handle
This change has a total of 8580 bytes in savings.
$ objdump -dr /tmp/vmlinux-orig | grep '^[0-9a-f]* <trace_raw_output' | wc -l
324
That's 324 tracepoints. But this does not include modules (which contain
many more tracepoints). For an allyesconfig build:
$ objdump -dr vmlinux-allyes-orig | grep '^[0-9a-f]* <trace_raw_output' | wc -l
1401
That's 1401 tracepoints giving us:
text data bss dec hex filename
137920629 140221067 53264384 331406080 13c0db00 vmlinux-allyes-orig
137827709 140221067 53264384 331313160 13bf7008 vmlinux-allyes-handle
92920 bytes in savings!!!
Link: http://lkml.kernel.org/r/20170315021431.13107-2-andi@firstfloor.org
Reported-by: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-03-16 15:01:06 +00:00
|
|
|
/*
|
|
|
|
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
|
|
|
|
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
|
|
|
|
* simplifies those functions and keeps them in sync.
|
|
|
|
*/
|
|
|
|
enum print_line_t trace_handle_return(struct trace_seq *s)
|
|
|
|
{
|
|
|
|
return trace_seq_has_overflowed(s) ?
|
|
|
|
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(trace_handle_return);
|
|
|
|
|
2008-09-16 18:56:41 +00:00
|
|
|
void
|
2019-05-25 16:57:59 +00:00
|
|
|
tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
|
|
|
|
unsigned long flags, int pc)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
2008-09-30 03:02:42 +00:00
|
|
|
entry->preempt_count = pc & 0xff;
|
|
|
|
entry->pid = (tsk) ? tsk->pid : 0;
|
2019-05-25 16:57:59 +00:00
|
|
|
entry->type = type;
|
2008-09-30 03:02:42 +00:00
|
|
|
entry->flags =
|
2008-10-24 13:42:59 +00:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
2008-08-01 16:26:40 +00:00
|
|
|
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
|
2008-10-24 13:42:59 +00:00
|
|
|
#else
|
|
|
|
TRACE_FLAG_IRQS_NOSUPPORT |
|
|
|
|
#endif
|
2016-03-18 15:28:04 +00:00
|
|
|
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
|
2008-05-12 19:20:42 +00:00
|
|
|
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
|
2016-12-09 16:20:17 +00:00
|
|
|
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
|
2013-10-04 15:28:26 +00:00
|
|
|
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
|
|
|
|
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
2009-08-06 23:25:54 +00:00
|
|
|
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-09-02 18:17:06 +00:00
|
|
|
struct ring_buffer_event *
|
2019-12-13 18:58:57 +00:00
|
|
|
trace_buffer_lock_reserve(struct trace_buffer *buffer,
|
2009-09-02 18:17:06 +00:00
|
|
|
int type,
|
|
|
|
unsigned long len,
|
|
|
|
unsigned long flags, int pc)
|
tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}
Impact: new API
These new functions do what previously was being open coded, reducing
the number of details ftrace plugin writers have to worry about.
It also standardizes the handling of stacktrace, userstacktrace and
other trace options we may introduce in the future.
With this patch, for instance, the blk tracer (and some others already
in the tree) can use the "userstacktrace" /d/tracing/trace_options
facility.
$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
trace_vprintk | -5
trace_graph_return | -22
trace_graph_entry | -26
trace_function | -45
__ftrace_trace_stack | -27
ftrace_trace_userstack | -29
tracing_sched_switch_trace | -66
tracing_stop | +1
trace_seq_to_user | -1
ftrace_trace_special | -63
ftrace_special | +1
tracing_sched_wakeup_trace | -70
tracing_reset_online_cpus | -1
13 functions changed, 2 bytes added, 355 bytes removed, diff: -353
linux-2.6-tip/block/blktrace.c:
__blk_add_trace | -58
1 function changed, 58 bytes removed, diff: -58
linux-2.6-tip/kernel/trace/trace.c:
trace_buffer_lock_reserve | +88
trace_buffer_unlock_commit | +86
2 functions changed, 174 bytes added, diff: +174
/tmp/vmlinux.after:
16 functions changed, 176 bytes added, 413 bytes removed, diff: -237
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Frédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-05 18:14:13 +00:00
|
|
|
{
|
2016-11-23 16:29:58 +00:00
|
|
|
return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
|
2016-05-03 21:15:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
|
|
|
|
DEFINE_PER_CPU(int, trace_buffered_event_cnt);
|
|
|
|
static int trace_buffered_event_ref;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* trace_buffered_event_enable - enable buffering events
|
|
|
|
*
|
|
|
|
* When events are being filtered, it is quicker to use a temporary
|
|
|
|
* buffer to write the event data into if there's a likely chance
|
|
|
|
* that it will not be committed. The discard of the ring buffer
|
|
|
|
* is not as fast as committing, and is much slower than copying
|
|
|
|
* a commit.
|
|
|
|
*
|
|
|
|
* When an event is to be filtered, allocate per cpu buffers to
|
|
|
|
* write the event data into, and if the event is filtered and discarded
|
|
|
|
* it is simply dropped, otherwise, the entire data is to be committed
|
|
|
|
* in one shot.
|
|
|
|
*/
|
|
|
|
void trace_buffered_event_enable(void)
|
|
|
|
{
|
|
|
|
struct ring_buffer_event *event;
|
|
|
|
struct page *page;
|
|
|
|
int cpu;
|
tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}
Impact: new API
These new functions do what previously was being open coded, reducing
the number of details ftrace plugin writers have to worry about.
It also standardizes the handling of stacktrace, userstacktrace and
other trace options we may introduce in the future.
With this patch, for instance, the blk tracer (and some others already
in the tree) can use the "userstacktrace" /d/tracing/trace_options
facility.
$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
trace_vprintk | -5
trace_graph_return | -22
trace_graph_entry | -26
trace_function | -45
__ftrace_trace_stack | -27
ftrace_trace_userstack | -29
tracing_sched_switch_trace | -66
tracing_stop | +1
trace_seq_to_user | -1
ftrace_trace_special | -63
ftrace_special | +1
tracing_sched_wakeup_trace | -70
tracing_reset_online_cpus | -1
13 functions changed, 2 bytes added, 355 bytes removed, diff: -353
linux-2.6-tip/block/blktrace.c:
__blk_add_trace | -58
1 function changed, 58 bytes removed, diff: -58
linux-2.6-tip/kernel/trace/trace.c:
trace_buffer_lock_reserve | +88
trace_buffer_unlock_commit | +86
2 functions changed, 174 bytes added, diff: +174
/tmp/vmlinux.after:
16 functions changed, 176 bytes added, 413 bytes removed, diff: -237
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Frédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-05 18:14:13 +00:00
|
|
|
|
2016-05-03 21:15:43 +00:00
|
|
|
WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
|
|
|
|
|
|
|
|
if (trace_buffered_event_ref++)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
|
page = alloc_pages_node(cpu_to_node(cpu),
|
|
|
|
GFP_KERNEL | __GFP_NORETRY, 0);
|
|
|
|
if (!page)
|
|
|
|
goto failed;
|
|
|
|
|
|
|
|
event = page_address(page);
|
|
|
|
memset(event, 0, sizeof(*event));
|
|
|
|
|
|
|
|
per_cpu(trace_buffered_event, cpu) = event;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
if (cpu == smp_processor_id() &&
|
|
|
|
this_cpu_read(trace_buffered_event) !=
|
|
|
|
per_cpu(trace_buffered_event, cpu))
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
preempt_enable();
|
tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}
Impact: new API
These new functions do what previously was being open coded, reducing
the number of details ftrace plugin writers have to worry about.
It also standardizes the handling of stacktrace, userstacktrace and
other trace options we may introduce in the future.
With this patch, for instance, the blk tracer (and some others already
in the tree) can use the "userstacktrace" /d/tracing/trace_options
facility.
$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
trace_vprintk | -5
trace_graph_return | -22
trace_graph_entry | -26
trace_function | -45
__ftrace_trace_stack | -27
ftrace_trace_userstack | -29
tracing_sched_switch_trace | -66
tracing_stop | +1
trace_seq_to_user | -1
ftrace_trace_special | -63
ftrace_special | +1
tracing_sched_wakeup_trace | -70
tracing_reset_online_cpus | -1
13 functions changed, 2 bytes added, 355 bytes removed, diff: -353
linux-2.6-tip/block/blktrace.c:
__blk_add_trace | -58
1 function changed, 58 bytes removed, diff: -58
linux-2.6-tip/kernel/trace/trace.c:
trace_buffer_lock_reserve | +88
trace_buffer_unlock_commit | +86
2 functions changed, 174 bytes added, diff: +174
/tmp/vmlinux.after:
16 functions changed, 176 bytes added, 413 bytes removed, diff: -237
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Frédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-05 18:14:13 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 21:15:43 +00:00
|
|
|
return;
|
|
|
|
failed:
|
|
|
|
trace_buffered_event_disable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void enable_trace_buffered_event(void *data)
|
|
|
|
{
|
|
|
|
/* Probably not needed, but do it anyway */
|
|
|
|
smp_rmb();
|
|
|
|
this_cpu_dec(trace_buffered_event_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void disable_trace_buffered_event(void *data)
|
|
|
|
{
|
|
|
|
this_cpu_inc(trace_buffered_event_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* trace_buffered_event_disable - disable buffering events
|
|
|
|
*
|
|
|
|
* When a filter is removed, it is faster to not use the buffered
|
|
|
|
* events, and to commit directly into the ring buffer. Free up
|
|
|
|
* the temp buffers when there are no more users. This requires
|
|
|
|
* special synchronization with current events.
|
|
|
|
*/
|
|
|
|
void trace_buffered_event_disable(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!trace_buffered_event_ref))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (--trace_buffered_event_ref)
|
|
|
|
return;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
/* For each CPU, set the buffer as used. */
|
|
|
|
smp_call_function_many(tracing_buffer_mask,
|
|
|
|
disable_trace_buffered_event, NULL, 1);
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
/* Wait for all current users to finish */
|
2018-11-07 02:44:52 +00:00
|
|
|
synchronize_rcu();
|
2016-05-03 21:15:43 +00:00
|
|
|
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
|
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
|
|
|
|
per_cpu(trace_buffered_event, cpu) = NULL;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Make sure trace_buffered_event is NULL before clearing
|
|
|
|
* trace_buffered_event_cnt.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
/* Do the work on each cpu */
|
|
|
|
smp_call_function_many(tracing_buffer_mask,
|
|
|
|
enable_trace_buffered_event, NULL, 1);
|
|
|
|
preempt_enable();
|
tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}
Impact: new API
These new functions do what previously was being open coded, reducing
the number of details ftrace plugin writers have to worry about.
It also standardizes the handling of stacktrace, userstacktrace and
other trace options we may introduce in the future.
With this patch, for instance, the blk tracer (and some others already
in the tree) can use the "userstacktrace" /d/tracing/trace_options
facility.
$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
trace_vprintk | -5
trace_graph_return | -22
trace_graph_entry | -26
trace_function | -45
__ftrace_trace_stack | -27
ftrace_trace_userstack | -29
tracing_sched_switch_trace | -66
tracing_stop | +1
trace_seq_to_user | -1
ftrace_trace_special | -63
ftrace_special | +1
tracing_sched_wakeup_trace | -70
tracing_reset_online_cpus | -1
13 functions changed, 2 bytes added, 355 bytes removed, diff: -353
linux-2.6-tip/block/blktrace.c:
__blk_add_trace | -58
1 function changed, 58 bytes removed, diff: -58
linux-2.6-tip/kernel/trace/trace.c:
trace_buffer_lock_reserve | +88
trace_buffer_unlock_commit | +86
2 functions changed, 174 bytes added, diff: +174
/tmp/vmlinux.after:
16 functions changed, 176 bytes added, 413 bytes removed, diff: -237
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Frédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-05 18:14:13 +00:00
|
|
|
}
|
|
|
|
|
2019-12-13 18:58:57 +00:00
|
|
|
static struct trace_buffer *temp_buffer;
|
2014-03-26 03:39:41 +00:00
|
|
|
|
2012-08-02 14:32:10 +00:00
|
|
|
struct ring_buffer_event *
|
2019-12-13 18:58:57 +00:00
|
|
|
trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *trace_file,
|
2012-08-02 14:32:10 +00:00
|
|
|
int type, unsigned long len,
|
|
|
|
unsigned long flags, int pc)
|
|
|
|
{
|
2014-03-26 03:39:41 +00:00
|
|
|
struct ring_buffer_event *entry;
|
2016-05-03 21:15:43 +00:00
|
|
|
int val;
|
2014-03-26 03:39:41 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
*current_rb = trace_file->tr->array_buffer.buffer;
|
2016-05-03 21:15:43 +00:00
|
|
|
|
2018-01-16 02:51:39 +00:00
|
|
|
if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
|
2016-05-03 21:15:43 +00:00
|
|
|
(EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
|
|
|
|
(entry = this_cpu_read(trace_buffered_event))) {
|
|
|
|
/* Try to use the per cpu buffer first */
|
|
|
|
val = this_cpu_inc_return(trace_buffered_event_cnt);
|
|
|
|
if (val == 1) {
|
|
|
|
trace_event_setup(entry, type, flags, pc);
|
|
|
|
entry->array[0] = len;
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
this_cpu_dec(trace_buffered_event_cnt);
|
|
|
|
}
|
|
|
|
|
2016-11-23 16:29:58 +00:00
|
|
|
entry = __trace_buffer_lock_reserve(*current_rb,
|
|
|
|
type, len, flags, pc);
|
2014-03-26 03:39:41 +00:00
|
|
|
/*
|
|
|
|
* If tracing is off, but we have triggers enabled
|
|
|
|
* we still need to look at the event data. Use the temp_buffer
|
|
|
|
* to store the trace event for the tigger to use. It's recusive
|
|
|
|
* safe and will not be recorded anywhere.
|
|
|
|
*/
|
2015-05-13 19:12:33 +00:00
|
|
|
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
|
2014-03-26 03:39:41 +00:00
|
|
|
*current_rb = temp_buffer;
|
2016-11-23 16:29:58 +00:00
|
|
|
entry = __trace_buffer_lock_reserve(*current_rb,
|
|
|
|
type, len, flags, pc);
|
2014-03-26 03:39:41 +00:00
|
|
|
}
|
|
|
|
return entry;
|
2012-08-02 14:32:10 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
|
|
|
|
|
2016-11-23 20:52:45 +00:00
|
|
|
static DEFINE_SPINLOCK(tracepoint_iter_lock);
|
|
|
|
static DEFINE_MUTEX(tracepoint_printk_mutex);
|
|
|
|
|
|
|
|
static void output_printk(struct trace_event_buffer *fbuffer)
|
|
|
|
{
|
|
|
|
struct trace_event_call *event_call;
|
2020-01-10 16:05:18 +00:00
|
|
|
struct trace_event_file *file;
|
2016-11-23 20:52:45 +00:00
|
|
|
struct trace_event *event;
|
|
|
|
unsigned long flags;
|
|
|
|
struct trace_iterator *iter = tracepoint_print_iter;
|
|
|
|
|
|
|
|
/* We should never get here if iter is NULL */
|
|
|
|
if (WARN_ON_ONCE(!iter))
|
|
|
|
return;
|
|
|
|
|
|
|
|
event_call = fbuffer->trace_file->event_call;
|
|
|
|
if (!event_call || !event_call->event.funcs ||
|
|
|
|
!event_call->event.funcs->trace)
|
|
|
|
return;
|
|
|
|
|
2020-01-10 16:05:18 +00:00
|
|
|
file = fbuffer->trace_file;
|
|
|
|
if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
|
|
|
|
(unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
|
|
|
|
!filter_match_preds(file->filter, fbuffer->entry)))
|
|
|
|
return;
|
|
|
|
|
2016-11-23 20:52:45 +00:00
|
|
|
event = &fbuffer->trace_file->event_call->event;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&tracepoint_iter_lock, flags);
|
|
|
|
trace_seq_init(&iter->seq);
|
|
|
|
iter->ent = fbuffer->entry;
|
|
|
|
event_call->event.funcs->trace(iter, 0, event);
|
|
|
|
trace_seq_putc(&iter->seq, 0);
|
|
|
|
printk("%s", iter->seq.buffer);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
|
2020-04-24 06:43:38 +00:00
|
|
|
void *buffer, size_t *lenp,
|
2016-11-23 20:52:45 +00:00
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
int save_tracepoint_printk;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&tracepoint_printk_mutex);
|
|
|
|
save_tracepoint_printk = tracepoint_printk;
|
|
|
|
|
|
|
|
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will force exiting early, as tracepoint_printk
|
|
|
|
* is always zero when tracepoint_printk_iter is not allocated
|
|
|
|
*/
|
|
|
|
if (!tracepoint_print_iter)
|
|
|
|
tracepoint_printk = 0;
|
|
|
|
|
|
|
|
if (save_tracepoint_printk == tracepoint_printk)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (tracepoint_printk)
|
|
|
|
static_key_enable(&tracepoint_printk_key.key);
|
|
|
|
else
|
|
|
|
static_key_disable(&tracepoint_printk_key.key);
|
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&tracepoint_printk_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
|
|
|
|
{
|
|
|
|
if (static_key_false(&tracepoint_printk_key.key))
|
|
|
|
output_printk(fbuffer);
|
|
|
|
|
2020-01-10 16:05:31 +00:00
|
|
|
event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
|
2016-11-23 20:52:45 +00:00
|
|
|
fbuffer->event, fbuffer->entry,
|
2020-01-10 16:05:31 +00:00
|
|
|
fbuffer->flags, fbuffer->pc, fbuffer->regs);
|
2016-11-23 20:52:45 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
|
|
|
|
|
2018-01-23 18:25:04 +00:00
|
|
|
/*
|
|
|
|
* Skip 3:
|
|
|
|
*
|
|
|
|
* trace_buffer_unlock_commit_regs()
|
|
|
|
* trace_event_buffer_commit()
|
|
|
|
* trace_event_raw_event_xxx()
|
2018-01-29 09:41:26 +00:00
|
|
|
*/
|
2018-01-23 18:25:04 +00:00
|
|
|
# define STACK_SKIP 3
|
|
|
|
|
2015-09-25 21:38:44 +00:00
|
|
|
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer,
|
2012-11-02 00:54:21 +00:00
|
|
|
struct ring_buffer_event *event,
|
|
|
|
unsigned long flags, int pc,
|
|
|
|
struct pt_regs *regs)
|
2011-06-08 07:09:34 +00:00
|
|
|
{
|
2012-10-11 16:14:25 +00:00
|
|
|
__buffer_unlock_commit(buffer, event);
|
2011-06-08 07:09:34 +00:00
|
|
|
|
2016-06-23 18:03:47 +00:00
|
|
|
/*
|
2018-01-23 18:25:04 +00:00
|
|
|
* If regs is not set, then skip the necessary functions.
|
2016-06-23 18:03:47 +00:00
|
|
|
* Note, we can still get here via blktrace, wakeup tracer
|
|
|
|
* and mmiotrace, but that's ok if they lose a function or
|
2018-01-23 18:25:04 +00:00
|
|
|
* two. They are not that meaningful.
|
2016-06-23 18:03:47 +00:00
|
|
|
*/
|
2018-01-23 18:25:04 +00:00
|
|
|
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
|
2011-06-08 07:09:34 +00:00
|
|
|
ftrace_trace_userstack(buffer, flags, pc);
|
|
|
|
}
|
|
|
|
|
2016-11-24 01:28:38 +00:00
|
|
|
/*
|
|
|
|
* Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
|
|
|
|
*/
|
|
|
|
void
|
2019-12-13 18:58:57 +00:00
|
|
|
trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
|
2016-11-24 01:28:38 +00:00
|
|
|
struct ring_buffer_event *event)
|
|
|
|
{
|
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
|
}
|
|
|
|
|
2016-11-21 07:57:18 +00:00
|
|
|
static void
|
|
|
|
trace_process_export(struct trace_export *export,
|
|
|
|
struct ring_buffer_event *event)
|
|
|
|
{
|
|
|
|
struct trace_entry *entry;
|
|
|
|
unsigned int size = 0;
|
|
|
|
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
size = ring_buffer_event_length(event);
|
2017-06-02 10:20:25 +00:00
|
|
|
export->write(export, entry, size);
|
2016-11-21 07:57:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(ftrace_export_lock);
|
|
|
|
|
|
|
|
static struct trace_export __rcu *ftrace_exports_list __read_mostly;
|
|
|
|
|
|
|
|
static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
|
|
|
|
|
|
|
|
static inline void ftrace_exports_enable(void)
|
|
|
|
{
|
|
|
|
static_branch_enable(&ftrace_exports_enabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ftrace_exports_disable(void)
|
|
|
|
{
|
|
|
|
static_branch_disable(&ftrace_exports_enabled);
|
|
|
|
}
|
|
|
|
|
2018-05-16 19:30:12 +00:00
|
|
|
static void ftrace_exports(struct ring_buffer_event *event)
|
2016-11-21 07:57:18 +00:00
|
|
|
{
|
|
|
|
struct trace_export *export;
|
|
|
|
|
|
|
|
preempt_disable_notrace();
|
|
|
|
|
2019-07-11 20:45:41 +00:00
|
|
|
export = rcu_dereference_raw_check(ftrace_exports_list);
|
2016-11-21 07:57:18 +00:00
|
|
|
while (export) {
|
|
|
|
trace_process_export(export, event);
|
2019-07-11 20:45:41 +00:00
|
|
|
export = rcu_dereference_raw_check(export->next);
|
2016-11-21 07:57:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
preempt_enable_notrace();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
add_trace_export(struct trace_export **list, struct trace_export *export)
|
|
|
|
{
|
|
|
|
rcu_assign_pointer(export->next, *list);
|
|
|
|
/*
|
|
|
|
* We are entering export into the list but another
|
|
|
|
* CPU might be walking that list. We need to make sure
|
|
|
|
* the export->next pointer is valid before another CPU sees
|
|
|
|
* the export pointer included into the list.
|
|
|
|
*/
|
|
|
|
rcu_assign_pointer(*list, export);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
rm_trace_export(struct trace_export **list, struct trace_export *export)
|
|
|
|
{
|
|
|
|
struct trace_export **p;
|
|
|
|
|
|
|
|
for (p = list; *p != NULL; p = &(*p)->next)
|
|
|
|
if (*p == export)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (*p != export)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rcu_assign_pointer(*p, (*p)->next);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
add_ftrace_export(struct trace_export **list, struct trace_export *export)
|
|
|
|
{
|
|
|
|
if (*list == NULL)
|
|
|
|
ftrace_exports_enable();
|
|
|
|
|
|
|
|
add_trace_export(list, export);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
rm_ftrace_export(struct trace_export **list, struct trace_export *export)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rm_trace_export(list, export);
|
|
|
|
if (*list == NULL)
|
|
|
|
ftrace_exports_disable();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int register_ftrace_export(struct trace_export *export)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(!export->write))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
mutex_lock(&ftrace_export_lock);
|
|
|
|
|
|
|
|
add_ftrace_export(&ftrace_exports_list, export);
|
|
|
|
|
|
|
|
mutex_unlock(&ftrace_export_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(register_ftrace_export);
|
|
|
|
|
|
|
|
int unregister_ftrace_export(struct trace_export *export)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&ftrace_export_lock);
|
|
|
|
|
|
|
|
ret = rm_ftrace_export(&ftrace_exports_list, export);
|
|
|
|
|
|
|
|
mutex_unlock(&ftrace_export_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_ftrace_export);
|
|
|
|
|
2008-05-12 19:20:51 +00:00
|
|
|
void
|
2009-02-05 06:13:37 +00:00
|
|
|
trace_function(struct trace_array *tr,
|
2008-10-01 17:14:09 +00:00
|
|
|
unsigned long ip, unsigned long parent_ip, unsigned long flags,
|
|
|
|
int pc)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2015-05-05 15:45:27 +00:00
|
|
|
struct trace_event_call *call = &event_function;
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer = tr->array_buffer.buffer;
|
2008-09-30 03:02:41 +00:00
|
|
|
struct ring_buffer_event *event;
|
2008-09-30 03:02:42 +00:00
|
|
|
struct ftrace_entry *entry;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
|
|
|
flags, pc);
|
2008-09-30 03:02:41 +00:00
|
|
|
if (!event)
|
|
|
|
return;
|
|
|
|
entry = ring_buffer_event_data(event);
|
2008-09-30 03:02:42 +00:00
|
|
|
entry->ip = ip;
|
|
|
|
entry->parent_ip = parent_ip;
|
2009-03-31 05:48:49 +00:00
|
|
|
|
2016-11-21 07:57:18 +00:00
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event)) {
|
|
|
|
if (static_branch_unlikely(&ftrace_exports_enabled))
|
|
|
|
ftrace_exports(event);
|
2012-10-11 16:14:25 +00:00
|
|
|
__buffer_unlock_commit(buffer, event);
|
2016-11-21 07:57:18 +00:00
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2009-07-29 15:51:13 +00:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
2011-07-14 20:36:53 +00:00
|
|
|
|
2019-04-25 09:45:14 +00:00
|
|
|
/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
|
|
|
|
#define FTRACE_KSTACK_NESTING 4
|
|
|
|
|
|
|
|
#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
|
|
|
|
|
2011-07-14 20:36:53 +00:00
|
|
|
struct ftrace_stack {
|
2019-04-25 09:45:14 +00:00
|
|
|
unsigned long calls[FTRACE_KSTACK_ENTRIES];
|
2011-07-14 20:36:53 +00:00
|
|
|
};
|
|
|
|
|
2019-04-25 09:45:14 +00:00
|
|
|
|
|
|
|
struct ftrace_stacks {
|
|
|
|
struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
|
2011-07-14 20:36:53 +00:00
|
|
|
};
|
|
|
|
|
2019-04-25 09:45:14 +00:00
|
|
|
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
|
2011-07-14 20:36:53 +00:00
|
|
|
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
|
|
|
|
|
2019-12-13 18:58:57 +00:00
|
|
|
static void __ftrace_trace_stack(struct trace_buffer *buffer,
|
2009-01-16 00:12:40 +00:00
|
|
|
unsigned long flags,
|
2011-06-08 07:09:34 +00:00
|
|
|
int skip, int pc, struct pt_regs *regs)
|
2008-05-12 19:20:51 +00:00
|
|
|
{
|
2015-05-05 15:45:27 +00:00
|
|
|
struct trace_event_call *call = &event_kernel_stack;
|
2008-09-30 03:02:41 +00:00
|
|
|
struct ring_buffer_event *event;
|
2019-04-25 09:45:16 +00:00
|
|
|
unsigned int size, nr_entries;
|
2019-04-25 09:45:14 +00:00
|
|
|
struct ftrace_stack *fstack;
|
2008-09-30 03:02:42 +00:00
|
|
|
struct stack_entry *entry;
|
2019-04-25 09:45:14 +00:00
|
|
|
int stackidx;
|
2011-07-14 20:36:53 +00:00
|
|
|
|
2016-06-23 18:03:47 +00:00
|
|
|
/*
|
2018-01-23 18:25:04 +00:00
|
|
|
* Add one, for this function and the call to save_stack_trace()
|
2016-06-23 18:03:47 +00:00
|
|
|
* If regs is set, then these functions will not be in the way.
|
|
|
|
*/
|
2018-01-23 18:25:04 +00:00
|
|
|
#ifndef CONFIG_UNWINDER_ORC
|
2016-06-23 18:03:47 +00:00
|
|
|
if (!regs)
|
2019-04-25 09:45:16 +00:00
|
|
|
skip++;
|
2018-01-23 18:25:04 +00:00
|
|
|
#endif
|
2016-06-23 18:03:47 +00:00
|
|
|
|
2011-07-14 20:36:53 +00:00
|
|
|
preempt_disable_notrace();
|
|
|
|
|
2019-04-25 09:45:14 +00:00
|
|
|
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
|
|
|
|
|
|
|
|
/* This should never happen. If it does, yell once and skip */
|
|
|
|
if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
|
|
|
|
goto out;
|
|
|
|
|
2011-07-14 20:36:53 +00:00
|
|
|
/*
|
2019-04-25 09:45:14 +00:00
|
|
|
* The above __this_cpu_inc_return() is 'atomic' cpu local. An
|
|
|
|
* interrupt will either see the value pre increment or post
|
|
|
|
* increment. If the interrupt happens pre increment it will have
|
|
|
|
* restored the counter when it returns. We just need a barrier to
|
|
|
|
* keep gcc from moving things around.
|
2011-07-14 20:36:53 +00:00
|
|
|
*/
|
|
|
|
barrier();
|
|
|
|
|
2019-04-25 09:45:14 +00:00
|
|
|
fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
|
2019-04-25 09:45:16 +00:00
|
|
|
size = ARRAY_SIZE(fstack->calls);
|
2011-07-14 20:36:53 +00:00
|
|
|
|
2019-04-25 09:45:16 +00:00
|
|
|
if (regs) {
|
|
|
|
nr_entries = stack_trace_save_regs(regs, fstack->calls,
|
|
|
|
size, skip);
|
|
|
|
} else {
|
|
|
|
nr_entries = stack_trace_save(fstack->calls, size, skip);
|
|
|
|
}
|
2008-05-12 19:20:51 +00:00
|
|
|
|
2019-04-25 09:45:16 +00:00
|
|
|
size = nr_entries * sizeof(unsigned long);
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
|
|
|
|
sizeof(*entry) + size, flags, pc);
|
2008-09-30 03:02:41 +00:00
|
|
|
if (!event)
|
2011-07-14 20:36:53 +00:00
|
|
|
goto out;
|
|
|
|
entry = ring_buffer_event_data(event);
|
2008-05-12 19:20:51 +00:00
|
|
|
|
2019-04-25 09:45:16 +00:00
|
|
|
memcpy(&entry->caller, fstack->calls, size);
|
|
|
|
entry->size = nr_entries;
|
2008-05-12 19:20:51 +00:00
|
|
|
|
2013-10-24 13:34:17 +00:00
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
2012-10-11 16:14:25 +00:00
|
|
|
__buffer_unlock_commit(buffer, event);
|
2011-07-14 20:36:53 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
/* Again, don't let gcc optimize things here */
|
|
|
|
barrier();
|
2012-11-19 05:21:01 +00:00
|
|
|
__this_cpu_dec(ftrace_stack_reserve);
|
2011-07-14 20:36:53 +00:00
|
|
|
preempt_enable_notrace();
|
|
|
|
|
2008-05-12 19:20:47 +00:00
|
|
|
}
|
|
|
|
|
2015-09-30 15:45:22 +00:00
|
|
|
static inline void ftrace_trace_stack(struct trace_array *tr,
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer,
|
2015-09-29 19:38:55 +00:00
|
|
|
unsigned long flags,
|
|
|
|
int skip, int pc, struct pt_regs *regs)
|
2009-01-16 00:12:40 +00:00
|
|
|
{
|
2015-09-30 15:45:22 +00:00
|
|
|
if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
|
2009-01-16 00:12:40 +00:00
|
|
|
return;
|
|
|
|
|
2015-09-29 19:38:55 +00:00
|
|
|
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
|
2009-01-16 00:12:40 +00:00
|
|
|
}
|
|
|
|
|
2009-07-29 15:51:13 +00:00
|
|
|
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
|
|
|
int pc)
|
2008-10-01 17:14:09 +00:00
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer = tr->array_buffer.buffer;
|
2017-05-12 17:15:45 +00:00
|
|
|
|
|
|
|
if (rcu_is_watching()) {
|
|
|
|
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
|
|
|
|
* but if the above rcu_is_watching() failed, then the NMI
|
|
|
|
* triggered someplace critical, and rcu_irq_enter() should
|
|
|
|
* not be called from NMI.
|
|
|
|
*/
|
|
|
|
if (unlikely(in_nmi()))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rcu_irq_enter_irqson();
|
|
|
|
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
|
|
|
rcu_irq_exit_irqson();
|
2008-10-01 17:14:09 +00:00
|
|
|
}
|
|
|
|
|
2009-12-11 14:48:22 +00:00
|
|
|
/**
|
|
|
|
* trace_dump_stack - record a stack back trace in the trace buffer
|
2013-03-13 13:55:57 +00:00
|
|
|
* @skip: Number of functions to skip (helper handlers)
|
2009-12-11 14:48:22 +00:00
|
|
|
*/
|
2013-03-13 13:55:57 +00:00
|
|
|
void trace_dump_stack(int skip)
|
2009-12-11 14:48:22 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (tracing_disabled || tracing_selftest_running)
|
2009-12-14 20:58:33 +00:00
|
|
|
return;
|
2009-12-11 14:48:22 +00:00
|
|
|
|
|
|
|
local_save_flags(flags);
|
|
|
|
|
2018-01-23 18:25:04 +00:00
|
|
|
#ifndef CONFIG_UNWINDER_ORC
|
|
|
|
/* Skip 1 to skip this function. */
|
|
|
|
skip++;
|
|
|
|
#endif
|
2020-01-09 23:53:48 +00:00
|
|
|
__ftrace_trace_stack(global_trace.array_buffer.buffer,
|
2013-03-13 13:55:57 +00:00
|
|
|
flags, skip, preempt_count(), NULL);
|
2009-12-11 14:48:22 +00:00
|
|
|
}
|
2018-10-17 06:51:43 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_dump_stack);
|
2009-12-11 14:48:22 +00:00
|
|
|
|
2019-04-25 09:45:15 +00:00
|
|
|
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
|
2010-11-10 11:56:12 +00:00
|
|
|
static DEFINE_PER_CPU(int, user_stack_count);
|
|
|
|
|
2019-04-25 09:45:15 +00:00
|
|
|
static void
|
2019-12-13 18:58:57 +00:00
|
|
|
ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
|
2008-11-22 11:28:47 +00:00
|
|
|
{
|
2015-05-05 15:45:27 +00:00
|
|
|
struct trace_event_call *call = &event_user_stack;
|
2008-11-23 10:39:06 +00:00
|
|
|
struct ring_buffer_event *event;
|
2008-11-22 11:28:47 +00:00
|
|
|
struct userstack_entry *entry;
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
|
2008-11-22 11:28:47 +00:00
|
|
|
return;
|
|
|
|
|
tracing: Do not record user stack trace from NMI context
A bug was found with Li Zefan's ftrace_stress_test that caused applications
to segfault during the test.
Placing a tracing_off() in the segfault code, and examining several
traces, I found that the following was always the case. The lock tracer
was enabled (lockdep being required) and userstack was enabled. Testing
this out, I just enabled the two, but that was not good enough. I needed
to run something else that could trigger it. Running a load like hackbench
did not work, but executing a new program would. The following would
trigger the segfault within seconds:
# echo 1 > /debug/tracing/options/userstacktrace
# echo 1 > /debug/tracing/events/lock/enable
# while :; do ls > /dev/null ; done
Enabling the function graph tracer and looking at what was happening
I finally noticed that all cashes happened just after an NMI.
1) | copy_user_handle_tail() {
1) | bad_area_nosemaphore() {
1) | __bad_area_nosemaphore() {
1) | no_context() {
1) | fixup_exception() {
1) 0.319 us | search_exception_tables();
1) 0.873 us | }
[...]
1) 0.314 us | __rcu_read_unlock();
1) 0.325 us | native_apic_mem_write();
1) 0.943 us | }
1) 0.304 us | rcu_nmi_exit();
[...]
1) 0.479 us | find_vma();
1) | bad_area() {
1) | __bad_area() {
After capturing several traces of failures, all of them happened
after an NMI. Curious about this, I added a trace_printk() to the NMI
handler to read the regs->ip to see where the NMI happened. In which I
found out it was here:
ffffffff8135b660 <page_fault>:
ffffffff8135b660: 48 83 ec 78 sub $0x78,%rsp
ffffffff8135b664: e8 97 01 00 00 callq ffffffff8135b800 <error_entry>
What was happening is that the NMI would happen at the place that a page
fault occurred. It would call rcu_read_lock() which was traced by
the lock events, and the user_stack_trace would run. This would trigger
a page fault inside the NMI. I do not see where the CR2 register is
saved or restored in NMI handling. This means that it would corrupt
the page fault handling that the NMI interrupted.
The reason the while loop of ls helped trigger the bug, was that
each execution of ls would cause lots of pages to be faulted in, and
increase the chances of the race happening.
The simple solution is to not allow user stack traces in NMI context.
After this patch, I ran the above "ls" test for a couple of hours
without any issues. Without this patch, the bug would trigger in less
than a minute.
Cc: stable@kernel.org
Reported-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-03-13 01:03:30 +00:00
|
|
|
/*
|
|
|
|
* NMIs can not handle page faults, even with fix ups.
|
|
|
|
* The save user stack can (and often does) fault.
|
|
|
|
*/
|
|
|
|
if (unlikely(in_nmi()))
|
|
|
|
return;
|
2008-11-22 11:28:47 +00:00
|
|
|
|
2010-11-10 11:56:12 +00:00
|
|
|
/*
|
|
|
|
* prevent recursion, since the user stack tracing may
|
|
|
|
* trigger other kernel events.
|
|
|
|
*/
|
|
|
|
preempt_disable();
|
|
|
|
if (__this_cpu_read(user_stack_count))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
__this_cpu_inc(user_stack_count);
|
|
|
|
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
|
|
|
sizeof(*entry), flags, pc);
|
2008-11-22 11:28:47 +00:00
|
|
|
if (!event)
|
2010-12-09 07:47:56 +00:00
|
|
|
goto out_drop_count;
|
2008-11-22 11:28:47 +00:00
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
|
2009-09-11 15:36:23 +00:00
|
|
|
entry->tgid = current->tgid;
|
2008-11-22 11:28:47 +00:00
|
|
|
memset(&entry->caller, 0, sizeof(entry->caller));
|
|
|
|
|
2019-04-25 09:45:16 +00:00
|
|
|
stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
|
2013-10-24 13:34:17 +00:00
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
2012-10-11 16:14:25 +00:00
|
|
|
__buffer_unlock_commit(buffer, event);
|
2010-11-10 11:56:12 +00:00
|
|
|
|
2010-12-09 07:47:56 +00:00
|
|
|
out_drop_count:
|
2010-11-10 11:56:12 +00:00
|
|
|
__this_cpu_dec(user_stack_count);
|
|
|
|
out:
|
|
|
|
preempt_enable();
|
2008-11-22 11:28:47 +00:00
|
|
|
}
|
2019-04-25 09:45:15 +00:00
|
|
|
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
|
2019-12-13 18:58:57 +00:00
|
|
|
static void ftrace_trace_userstack(struct trace_buffer *buffer,
|
2019-04-25 09:45:15 +00:00
|
|
|
unsigned long flags, int pc)
|
2008-11-22 11:28:47 +00:00
|
|
|
{
|
|
|
|
}
|
2019-04-25 09:45:15 +00:00
|
|
|
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
|
2008-11-22 11:28:47 +00:00
|
|
|
|
2009-07-29 15:51:13 +00:00
|
|
|
#endif /* CONFIG_STACKTRACE */
|
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
/* created for use with alloc_percpu */
|
|
|
|
struct trace_buffer_struct {
|
2016-05-26 19:00:33 +00:00
|
|
|
int nesting;
|
|
|
|
char buffer[4][TRACE_BUF_SIZE];
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct trace_buffer_struct *trace_percpu_buffer;
|
|
|
|
|
|
|
|
/*
|
2016-05-26 19:00:33 +00:00
|
|
|
* Thise allows for lockless recording. If we're nested too deeply, then
|
|
|
|
* this returns NULL.
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
*/
|
|
|
|
static char *get_trace_buf(void)
|
|
|
|
{
|
2016-05-26 19:00:33 +00:00
|
|
|
struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
|
2016-05-26 19:00:33 +00:00
|
|
|
if (!buffer || buffer->nesting >= 4)
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
return NULL;
|
|
|
|
|
2017-09-05 15:32:01 +00:00
|
|
|
buffer->nesting++;
|
|
|
|
|
|
|
|
/* Interrupts must see nesting incremented before we use the buffer */
|
|
|
|
barrier();
|
|
|
|
return &buffer->buffer[buffer->nesting][0];
|
2016-05-26 19:00:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void put_trace_buf(void)
|
|
|
|
{
|
2017-09-05 15:32:01 +00:00
|
|
|
/* Don't let the decrement of nesting leak before this */
|
|
|
|
barrier();
|
2016-05-26 19:00:33 +00:00
|
|
|
this_cpu_dec(trace_percpu_buffer->nesting);
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int alloc_percpu_trace_buffer(void)
|
|
|
|
{
|
|
|
|
struct trace_buffer_struct *buffers;
|
|
|
|
|
2020-08-06 16:46:49 +00:00
|
|
|
if (trace_percpu_buffer)
|
|
|
|
return 0;
|
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
buffers = alloc_percpu(struct trace_buffer_struct);
|
2020-01-25 15:52:30 +00:00
|
|
|
if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
|
2016-05-26 19:00:33 +00:00
|
|
|
return -ENOMEM;
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
|
|
|
|
trace_percpu_buffer = buffers;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-11 14:15:05 +00:00
|
|
|
static int buffers_allocated;
|
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
void trace_printk_init_buffers(void)
|
|
|
|
{
|
|
|
|
if (buffers_allocated)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (alloc_percpu_trace_buffer())
|
|
|
|
return;
|
|
|
|
|
2014-05-28 17:14:40 +00:00
|
|
|
/* trace_printk() is for debug use only. Don't use it in production. */
|
|
|
|
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("\n");
|
|
|
|
pr_warn("**********************************************************\n");
|
|
|
|
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
|
|
|
pr_warn("** **\n");
|
|
|
|
pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
|
|
|
|
pr_warn("** **\n");
|
|
|
|
pr_warn("** This means that this is a DEBUG kernel and it is **\n");
|
|
|
|
pr_warn("** unsafe for production use. **\n");
|
|
|
|
pr_warn("** **\n");
|
|
|
|
pr_warn("** If you see this message and you are not debugging **\n");
|
|
|
|
pr_warn("** the kernel, report this immediately to your vendor! **\n");
|
|
|
|
pr_warn("** **\n");
|
|
|
|
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
|
|
|
pr_warn("**********************************************************\n");
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
|
2012-10-11 01:44:34 +00:00
|
|
|
/* Expand the buffers to set size */
|
|
|
|
tracing_update_buffers();
|
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
buffers_allocated = 1;
|
2012-10-11 14:15:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* trace_printk_init_buffers() can be called by modules.
|
|
|
|
* If that happens, then we need to start cmdline recording
|
|
|
|
* directly here. If the global_trace.buffer is already
|
|
|
|
* allocated here, then this was called by module code.
|
|
|
|
*/
|
2020-01-09 23:53:48 +00:00
|
|
|
if (global_trace.array_buffer.buffer)
|
2012-10-11 14:15:05 +00:00
|
|
|
tracing_start_cmdline_record();
|
|
|
|
}
|
2019-03-20 18:28:51 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
|
2012-10-11 14:15:05 +00:00
|
|
|
|
|
|
|
void trace_printk_start_comm(void)
|
|
|
|
{
|
|
|
|
/* Start tracing comms if trace printk is set */
|
|
|
|
if (!buffers_allocated)
|
|
|
|
return;
|
|
|
|
tracing_start_cmdline_record();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void trace_printk_start_stop_comm(int enabled)
|
|
|
|
{
|
|
|
|
if (!buffers_allocated)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (enabled)
|
|
|
|
tracing_start_cmdline_record();
|
|
|
|
else
|
|
|
|
tracing_stop_cmdline_record();
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
}
|
|
|
|
|
2009-03-06 16:21:49 +00:00
|
|
|
/**
|
2009-03-12 17:24:49 +00:00
|
|
|
* trace_vbprintk - write binary msg to tracing buffer
|
2019-08-28 05:25:47 +00:00
|
|
|
* @ip: The address of the caller
|
|
|
|
* @fmt: The string format to write to the buffer
|
|
|
|
* @args: Arguments for @fmt
|
2009-03-06 16:21:49 +00:00
|
|
|
*/
|
2009-03-19 18:03:53 +00:00
|
|
|
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
2009-03-06 16:21:49 +00:00
|
|
|
{
|
2015-05-05 15:45:27 +00:00
|
|
|
struct trace_event_call *call = &event_bprint;
|
2009-03-06 16:21:49 +00:00
|
|
|
struct ring_buffer_event *event;
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2009-03-06 16:21:49 +00:00
|
|
|
struct trace_array *tr = &global_trace;
|
2009-03-12 17:24:49 +00:00
|
|
|
struct bprint_entry *entry;
|
2009-03-06 16:21:49 +00:00
|
|
|
unsigned long flags;
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
char *tbuffer;
|
|
|
|
int len = 0, size, pc;
|
2009-03-06 16:21:49 +00:00
|
|
|
|
|
|
|
if (unlikely(tracing_selftest_running || tracing_disabled))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't pollute graph traces with trace_vprintk internals */
|
|
|
|
pause_graph_tracing();
|
|
|
|
|
|
|
|
pc = preempt_count();
|
2010-06-03 13:36:50 +00:00
|
|
|
preempt_disable_notrace();
|
2009-03-06 16:21:49 +00:00
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
tbuffer = get_trace_buf();
|
|
|
|
if (!tbuffer) {
|
|
|
|
len = 0;
|
2016-05-26 19:00:33 +00:00
|
|
|
goto out_nobuffer;
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
}
|
2009-03-06 16:21:49 +00:00
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
|
2009-03-06 16:21:49 +00:00
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
|
2020-01-22 11:44:50 +00:00
|
|
|
goto out_put;
|
2009-03-06 16:21:49 +00:00
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
local_save_flags(flags);
|
2009-03-06 16:21:49 +00:00
|
|
|
size = sizeof(*entry) + sizeof(u32) * len;
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = tr->array_buffer.buffer;
|
2020-01-16 13:20:18 +00:00
|
|
|
ring_buffer_nest_start(buffer);
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
|
|
|
|
flags, pc);
|
2009-03-06 16:21:49 +00:00
|
|
|
if (!event)
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
goto out;
|
2009-03-06 16:21:49 +00:00
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
entry->ip = ip;
|
|
|
|
entry->fmt = fmt;
|
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
|
2013-10-24 13:34:17 +00:00
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event)) {
|
2012-10-11 16:14:25 +00:00
|
|
|
__buffer_unlock_commit(buffer, event);
|
2015-09-30 15:45:22 +00:00
|
|
|
ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
|
2010-01-06 22:27:11 +00:00
|
|
|
}
|
2009-03-06 16:21:49 +00:00
|
|
|
|
|
|
|
out:
|
2020-01-16 13:20:18 +00:00
|
|
|
ring_buffer_nest_end(buffer);
|
2020-01-22 11:44:50 +00:00
|
|
|
out_put:
|
2016-05-26 19:00:33 +00:00
|
|
|
put_trace_buf();
|
|
|
|
|
|
|
|
out_nobuffer:
|
2010-06-03 13:36:50 +00:00
|
|
|
preempt_enable_notrace();
|
2009-03-06 16:21:49 +00:00
|
|
|
unpause_graph_tracing();
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
2009-03-12 17:24:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_vbprintk);
|
|
|
|
|
2018-03-08 20:58:43 +00:00
|
|
|
__printf(3, 0)
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
static int
|
2019-12-13 18:58:57 +00:00
|
|
|
__trace_array_vprintk(struct trace_buffer *buffer,
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
unsigned long ip, const char *fmt, va_list args)
|
2009-03-12 17:24:49 +00:00
|
|
|
{
|
2015-05-05 15:45:27 +00:00
|
|
|
struct trace_event_call *call = &event_print;
|
2009-03-12 17:24:49 +00:00
|
|
|
struct ring_buffer_event *event;
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
int len = 0, size, pc;
|
2009-03-12 17:24:49 +00:00
|
|
|
struct print_entry *entry;
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
unsigned long flags;
|
|
|
|
char *tbuffer;
|
2009-03-12 17:24:49 +00:00
|
|
|
|
|
|
|
if (tracing_disabled || tracing_selftest_running)
|
|
|
|
return 0;
|
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
/* Don't pollute graph traces with trace_vprintk internals */
|
|
|
|
pause_graph_tracing();
|
|
|
|
|
2009-03-12 17:24:49 +00:00
|
|
|
pc = preempt_count();
|
|
|
|
preempt_disable_notrace();
|
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
|
|
|
|
tbuffer = get_trace_buf();
|
|
|
|
if (!tbuffer) {
|
|
|
|
len = 0;
|
2016-05-26 19:00:33 +00:00
|
|
|
goto out_nobuffer;
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
}
|
2009-03-12 17:24:49 +00:00
|
|
|
|
2014-11-27 15:57:52 +00:00
|
|
|
len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
|
2009-03-12 17:24:49 +00:00
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
local_save_flags(flags);
|
2009-03-12 17:24:49 +00:00
|
|
|
size = sizeof(*entry) + len + 1;
|
2020-01-16 13:20:18 +00:00
|
|
|
ring_buffer_nest_start(buffer);
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
|
|
|
|
flags, pc);
|
2009-03-12 17:24:49 +00:00
|
|
|
if (!event)
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
goto out;
|
2009-03-12 17:24:49 +00:00
|
|
|
entry = ring_buffer_event_data(event);
|
2009-11-16 19:56:13 +00:00
|
|
|
entry->ip = ip;
|
2009-03-12 17:24:49 +00:00
|
|
|
|
2014-11-27 15:57:52 +00:00
|
|
|
memcpy(&entry->buf, tbuffer, len + 1);
|
2013-10-24 13:34:17 +00:00
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event)) {
|
2012-10-11 16:14:25 +00:00
|
|
|
__buffer_unlock_commit(buffer, event);
|
2015-09-30 15:45:22 +00:00
|
|
|
ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
|
2010-01-06 22:27:11 +00:00
|
|
|
}
|
2016-05-26 19:00:33 +00:00
|
|
|
|
|
|
|
out:
|
2020-01-16 13:20:18 +00:00
|
|
|
ring_buffer_nest_end(buffer);
|
2016-05-26 19:00:33 +00:00
|
|
|
put_trace_buf();
|
|
|
|
|
|
|
|
out_nobuffer:
|
2009-03-12 17:24:49 +00:00
|
|
|
preempt_enable_notrace();
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
unpause_graph_tracing();
|
2009-03-12 17:24:49 +00:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
2009-09-03 23:11:07 +00:00
|
|
|
|
2018-03-08 20:58:43 +00:00
|
|
|
__printf(3, 0)
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
int trace_array_vprintk(struct trace_array *tr,
|
|
|
|
unsigned long ip, const char *fmt, va_list args)
|
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
}
|
|
|
|
|
2020-08-06 16:46:49 +00:00
|
|
|
/**
|
|
|
|
* trace_array_printk - Print a message to a specific instance
|
|
|
|
* @tr: The instance trace_array descriptor
|
|
|
|
* @ip: The instruction pointer that this is called from.
|
|
|
|
* @fmt: The format to print (printf format)
|
|
|
|
*
|
|
|
|
* If a subsystem sets up its own instance, they have the right to
|
|
|
|
* printk strings into their tracing instance buffer using this
|
|
|
|
* function. Note, this function will not write into the top level
|
|
|
|
* buffer (use trace_printk() for that), as writing into the top level
|
|
|
|
* buffer should only have events that can be individually disabled.
|
|
|
|
* trace_printk() is only used for debugging a kernel, and should not
|
|
|
|
* be ever encorporated in normal use.
|
|
|
|
*
|
|
|
|
* trace_array_printk() can be used, as it will not add noise to the
|
|
|
|
* top level tracing buffer.
|
|
|
|
*
|
|
|
|
* Note, trace_array_init_printk() must be called on @tr before this
|
|
|
|
* can be used.
|
|
|
|
*/
|
2018-03-08 20:58:43 +00:00
|
|
|
__printf(3, 0)
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
int trace_array_printk(struct trace_array *tr,
|
|
|
|
unsigned long ip, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
va_list ap;
|
|
|
|
|
2019-08-14 17:55:25 +00:00
|
|
|
if (!tr)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2020-06-16 18:53:55 +00:00
|
|
|
/* This is only allowed for created instances */
|
|
|
|
if (tr == &global_trace)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(tr->trace_flags & TRACE_ITER_PRINTK))
|
|
|
|
return 0;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
va_start(ap, fmt);
|
|
|
|
ret = trace_array_vprintk(tr, ip, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
return ret;
|
|
|
|
}
|
2019-03-20 18:28:51 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_array_printk);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
|
2020-08-06 16:46:49 +00:00
|
|
|
/**
|
|
|
|
* trace_array_init_printk - Initialize buffers for trace_array_printk()
|
|
|
|
* @tr: The trace array to initialize the buffers for
|
|
|
|
*
|
|
|
|
* As trace_array_printk() only writes into instances, they are OK to
|
|
|
|
* have in the kernel (unlike trace_printk()). This needs to be called
|
|
|
|
* before trace_array_printk() can be used on a trace_array.
|
|
|
|
*/
|
|
|
|
int trace_array_init_printk(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
if (!tr)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* This is only allowed for created instances */
|
|
|
|
if (tr == &global_trace)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return alloc_percpu_trace_buffer();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(trace_array_init_printk);
|
|
|
|
|
2018-03-08 20:58:43 +00:00
|
|
|
__printf(3, 4)
|
2019-12-13 18:58:57 +00:00
|
|
|
int trace_array_printk_buf(struct trace_buffer *buffer,
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
unsigned long ip, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
va_list ap;
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
ret = __trace_array_vprintk(buffer, ip, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-08 20:58:43 +00:00
|
|
|
__printf(2, 0)
|
2009-09-03 23:11:07 +00:00
|
|
|
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
|
|
|
|
{
|
2009-10-09 05:41:35 +00:00
|
|
|
return trace_array_vprintk(&global_trace, ip, fmt, args);
|
2009-09-03 23:11:07 +00:00
|
|
|
}
|
2009-03-06 16:21:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_vprintk);
|
|
|
|
|
2008-11-12 11:59:32 +00:00
|
|
|
static void trace_iterator_increment(struct trace_iterator *iter)
|
2008-09-03 21:42:51 +00:00
|
|
|
{
|
2012-06-28 00:46:14 +00:00
|
|
|
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
|
|
|
|
|
2008-09-03 21:42:51 +00:00
|
|
|
iter->idx++;
|
2012-06-28 00:46:14 +00:00
|
|
|
if (buf_iter)
|
2020-03-17 21:32:25 +00:00
|
|
|
ring_buffer_iter_advance(buf_iter);
|
2008-09-03 21:42:51 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:51 +00:00
|
|
|
static struct trace_entry *
|
2010-03-31 23:49:26 +00:00
|
|
|
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
|
|
|
|
unsigned long *lost_events)
|
2008-08-01 16:26:41 +00:00
|
|
|
{
|
2008-09-30 03:02:41 +00:00
|
|
|
struct ring_buffer_event *event;
|
2012-06-28 00:46:14 +00:00
|
|
|
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2020-03-17 21:32:32 +00:00
|
|
|
if (buf_iter) {
|
2008-10-01 04:29:53 +00:00
|
|
|
event = ring_buffer_iter_peek(buf_iter, ts);
|
2020-03-17 21:32:32 +00:00
|
|
|
if (lost_events)
|
|
|
|
*lost_events = ring_buffer_iter_dropped(buf_iter) ?
|
|
|
|
(unsigned long)-1 : 0;
|
|
|
|
} else {
|
2020-01-09 23:53:48 +00:00
|
|
|
event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
|
2010-03-31 23:49:26 +00:00
|
|
|
lost_events);
|
2020-03-17 21:32:32 +00:00
|
|
|
}
|
2008-10-01 04:29:53 +00:00
|
|
|
|
2011-07-14 20:36:53 +00:00
|
|
|
if (event) {
|
|
|
|
iter->ent_size = ring_buffer_event_length(event);
|
|
|
|
return ring_buffer_event_data(event);
|
|
|
|
}
|
|
|
|
iter->ent_size = 0;
|
|
|
|
return NULL;
|
2008-08-01 16:26:41 +00:00
|
|
|
}
|
2008-10-01 04:29:53 +00:00
|
|
|
|
2008-08-01 16:26:41 +00:00
|
|
|
static struct trace_entry *
|
2010-03-31 23:49:26 +00:00
|
|
|
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
|
|
|
|
unsigned long *missing_events, u64 *ent_ts)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer = iter->array_buffer->buffer;
|
2008-05-12 19:20:42 +00:00
|
|
|
struct trace_entry *ent, *next = NULL;
|
2010-04-05 09:11:05 +00:00
|
|
|
unsigned long lost_events = 0, next_lost = 0;
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
int cpu_file = iter->cpu_file;
|
2008-09-30 03:02:41 +00:00
|
|
|
u64 next_ts = 0, ts;
|
2008-05-12 19:20:42 +00:00
|
|
|
int next_cpu = -1;
|
2012-03-27 14:43:28 +00:00
|
|
|
int next_size = 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
int cpu;
|
|
|
|
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
/*
|
|
|
|
* If we are in a per_cpu trace file, don't bother by iterating over
|
|
|
|
* all cpu and peek directly.
|
|
|
|
*/
|
2013-01-23 20:22:59 +00:00
|
|
|
if (cpu_file > RING_BUFFER_ALL_CPUS) {
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
if (ring_buffer_empty_cpu(buffer, cpu_file))
|
|
|
|
return NULL;
|
2010-03-31 23:49:26 +00:00
|
|
|
ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
if (ent_cpu)
|
|
|
|
*ent_cpu = cpu_file;
|
|
|
|
|
|
|
|
return ent;
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
for_each_tracing_cpu(cpu) {
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
if (ring_buffer_empty_cpu(buffer, cpu))
|
|
|
|
continue;
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2010-03-31 23:49:26 +00:00
|
|
|
ent = peek_next_entry(iter, cpu, &ts, &lost_events);
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2008-05-12 19:20:46 +00:00
|
|
|
/*
|
|
|
|
* Pick the entry with the smallest timestamp:
|
|
|
|
*/
|
2008-09-30 03:02:41 +00:00
|
|
|
if (ent && (!next || ts < next_ts)) {
|
2008-05-12 19:20:42 +00:00
|
|
|
next = ent;
|
|
|
|
next_cpu = cpu;
|
2008-09-30 03:02:41 +00:00
|
|
|
next_ts = ts;
|
2010-03-31 23:49:26 +00:00
|
|
|
next_lost = lost_events;
|
2012-03-27 14:43:28 +00:00
|
|
|
next_size = iter->ent_size;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-27 14:43:28 +00:00
|
|
|
iter->ent_size = next_size;
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
if (ent_cpu)
|
|
|
|
*ent_cpu = next_cpu;
|
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
if (ent_ts)
|
|
|
|
*ent_ts = next_ts;
|
|
|
|
|
2010-03-31 23:49:26 +00:00
|
|
|
if (missing_events)
|
|
|
|
*missing_events = next_lost;
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
tracing: Do not allocate buffer in trace_find_next_entry() in atomic
When dumping out the trace data in latency format, a check is made to peek
at the next event to compare its timestamp to the current one, and if the
delta is of a greater size, it will add a marker showing so. But to do this,
it needs to save the current event otherwise peeking at the next event will
remove the current event. To save the event, a temp buffer is used, and if
the event is bigger than the temp buffer, the temp buffer is freed and a
bigger buffer is allocated.
This allocation is a problem when called in atomic context. The only way
this gets called via atomic context is via ftrace_dump(). Thus, use a static
buffer of 128 bytes (which covers most events), and if the event is bigger
than that, simply return NULL. The callers of trace_find_next_entry() need
to handle a NULL case, as that's what would happen if the allocation failed.
Link: https://lore.kernel.org/r/20200326091256.GR11705@shao2-debian
Fixes: ff895103a84ab ("tracing: Save off entry when peeking at next entry")
Reported-by: kernel test robot <rong.a.chen@intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2020-04-02 02:44:46 +00:00
|
|
|
#define STATIC_TEMP_BUF_SIZE 128
|
|
|
|
static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
|
|
|
|
|
2008-08-01 16:26:41 +00:00
|
|
|
/* Find the next real entry, without updating the iterator itself */
|
2009-02-02 22:29:21 +00:00
|
|
|
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
|
|
|
|
int *ent_cpu, u64 *ent_ts)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2020-03-17 21:32:23 +00:00
|
|
|
/* __find_next_entry will reset ent_size */
|
|
|
|
int ent_size = iter->ent_size;
|
|
|
|
struct trace_entry *entry;
|
|
|
|
|
tracing: Do not allocate buffer in trace_find_next_entry() in atomic
When dumping out the trace data in latency format, a check is made to peek
at the next event to compare its timestamp to the current one, and if the
delta is of a greater size, it will add a marker showing so. But to do this,
it needs to save the current event otherwise peeking at the next event will
remove the current event. To save the event, a temp buffer is used, and if
the event is bigger than the temp buffer, the temp buffer is freed and a
bigger buffer is allocated.
This allocation is a problem when called in atomic context. The only way
this gets called via atomic context is via ftrace_dump(). Thus, use a static
buffer of 128 bytes (which covers most events), and if the event is bigger
than that, simply return NULL. The callers of trace_find_next_entry() need
to handle a NULL case, as that's what would happen if the allocation failed.
Link: https://lore.kernel.org/r/20200326091256.GR11705@shao2-debian
Fixes: ff895103a84ab ("tracing: Save off entry when peeking at next entry")
Reported-by: kernel test robot <rong.a.chen@intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2020-04-02 02:44:46 +00:00
|
|
|
/*
|
|
|
|
* If called from ftrace_dump(), then the iter->temp buffer
|
|
|
|
* will be the static_temp_buf and not created from kmalloc.
|
|
|
|
* If the entry size is greater than the buffer, we can
|
|
|
|
* not save it. Just return NULL in that case. This is only
|
|
|
|
* used to add markers when two consecutive events' time
|
|
|
|
* stamps have a large delta. See trace_print_lat_context()
|
|
|
|
*/
|
|
|
|
if (iter->temp == static_temp_buf &&
|
|
|
|
STATIC_TEMP_BUF_SIZE < ent_size)
|
|
|
|
return NULL;
|
|
|
|
|
2020-03-17 21:32:23 +00:00
|
|
|
/*
|
|
|
|
* The __find_next_entry() may call peek_next_entry(), which may
|
|
|
|
* call ring_buffer_peek() that may make the contents of iter->ent
|
|
|
|
* undefined. Need to copy iter->ent now.
|
|
|
|
*/
|
|
|
|
if (iter->ent && iter->ent != iter->temp) {
|
tracing: Do not allocate buffer in trace_find_next_entry() in atomic
When dumping out the trace data in latency format, a check is made to peek
at the next event to compare its timestamp to the current one, and if the
delta is of a greater size, it will add a marker showing so. But to do this,
it needs to save the current event otherwise peeking at the next event will
remove the current event. To save the event, a temp buffer is used, and if
the event is bigger than the temp buffer, the temp buffer is freed and a
bigger buffer is allocated.
This allocation is a problem when called in atomic context. The only way
this gets called via atomic context is via ftrace_dump(). Thus, use a static
buffer of 128 bytes (which covers most events), and if the event is bigger
than that, simply return NULL. The callers of trace_find_next_entry() need
to handle a NULL case, as that's what would happen if the allocation failed.
Link: https://lore.kernel.org/r/20200326091256.GR11705@shao2-debian
Fixes: ff895103a84ab ("tracing: Save off entry when peeking at next entry")
Reported-by: kernel test robot <rong.a.chen@intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2020-04-02 02:44:46 +00:00
|
|
|
if ((!iter->temp || iter->temp_size < iter->ent_size) &&
|
|
|
|
!WARN_ON_ONCE(iter->temp == static_temp_buf)) {
|
2020-03-17 21:32:23 +00:00
|
|
|
kfree(iter->temp);
|
|
|
|
iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
|
|
|
|
if (!iter->temp)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy(iter->temp, iter->ent, iter->ent_size);
|
|
|
|
iter->temp_size = iter->ent_size;
|
|
|
|
iter->ent = iter->temp;
|
|
|
|
}
|
|
|
|
entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
|
|
|
|
/* Put back the original ent_size */
|
|
|
|
iter->ent_size = ent_size;
|
|
|
|
|
|
|
|
return entry;
|
2008-08-01 16:26:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the next real entry, and increment the iterator to the next entry */
|
2010-08-05 14:22:23 +00:00
|
|
|
void *trace_find_next_entry_inc(struct trace_iterator *iter)
|
2008-08-01 16:26:41 +00:00
|
|
|
{
|
2010-03-31 23:49:26 +00:00
|
|
|
iter->ent = __find_next_entry(iter, &iter->cpu,
|
|
|
|
&iter->lost_events, &iter->ts);
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
if (iter->ent)
|
2008-11-12 11:59:32 +00:00
|
|
|
trace_iterator_increment(iter);
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
return iter->ent ? iter : NULL;
|
2008-05-12 19:20:46 +00:00
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-05-12 19:20:51 +00:00
|
|
|
static void trace_consume(struct trace_iterator *iter)
|
2008-05-12 19:20:46 +00:00
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
|
2010-03-31 23:49:26 +00:00
|
|
|
&iter->lost_events);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:51 +00:00
|
|
|
static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
|
|
|
struct trace_iterator *iter = m->private;
|
|
|
|
int i = (int)*pos;
|
2008-05-12 19:20:45 +00:00
|
|
|
void *ent;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-12-07 14:11:39 +00:00
|
|
|
WARN_ON_ONCE(iter->leftover);
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
(*pos)++;
|
|
|
|
|
|
|
|
/* can't go backwards */
|
|
|
|
if (iter->idx > i)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (iter->idx < 0)
|
2010-08-05 14:22:23 +00:00
|
|
|
ent = trace_find_next_entry_inc(iter);
|
2008-05-12 19:20:42 +00:00
|
|
|
else
|
|
|
|
ent = iter;
|
|
|
|
|
|
|
|
while (ent && iter->idx < i)
|
2010-08-05 14:22:23 +00:00
|
|
|
ent = trace_find_next_entry_inc(iter);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
iter->pos = *pos;
|
|
|
|
|
|
|
|
return ent;
|
|
|
|
}
|
|
|
|
|
2010-08-05 14:22:23 +00:00
|
|
|
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
2009-09-01 15:06:29 +00:00
|
|
|
{
|
|
|
|
struct ring_buffer_iter *buf_iter;
|
|
|
|
unsigned long entries = 0;
|
|
|
|
u64 ts;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
|
2009-09-01 15:06:29 +00:00
|
|
|
|
2012-06-28 00:46:14 +00:00
|
|
|
buf_iter = trace_buffer_iter(iter, cpu);
|
|
|
|
if (!buf_iter)
|
2009-09-01 15:06:29 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
ring_buffer_iter_reset(buf_iter);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could have the case with the max latency tracers
|
|
|
|
* that a reset never took place on a cpu. This is evident
|
|
|
|
* by the timestamp being before the start of the buffer.
|
|
|
|
*/
|
2020-06-16 03:36:46 +00:00
|
|
|
while (ring_buffer_iter_peek(buf_iter, &ts)) {
|
2020-01-09 23:53:48 +00:00
|
|
|
if (ts >= iter->array_buffer->time_start)
|
2009-09-01 15:06:29 +00:00
|
|
|
break;
|
|
|
|
entries++;
|
2020-03-17 21:32:25 +00:00
|
|
|
ring_buffer_iter_advance(buf_iter);
|
2009-09-01 15:06:29 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
|
2009-09-01 15:06:29 +00:00
|
|
|
}
|
|
|
|
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
/*
|
|
|
|
* The current tracer is copied to avoid a global locking
|
|
|
|
* all around.
|
|
|
|
*/
|
2008-05-12 19:20:42 +00:00
|
|
|
static void *s_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct trace_iterator *iter = m->private;
|
2012-05-11 17:29:49 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
int cpu_file = iter->cpu_file;
|
2008-05-12 19:20:42 +00:00
|
|
|
void *p = NULL;
|
|
|
|
loff_t l = 0;
|
2008-09-30 03:02:41 +00:00
|
|
|
int cpu;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2012-12-26 02:52:52 +00:00
|
|
|
/*
|
|
|
|
* copy the tracer to avoid using a global lock all around.
|
|
|
|
* iter->trace is a copy of current_trace, the pointer to the
|
|
|
|
* name may be used instead of a strcmp(), as iter->trace->name
|
|
|
|
* will point to the same string as current_trace->name.
|
|
|
|
*/
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2012-05-11 17:29:49 +00:00
|
|
|
if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
|
|
|
|
*iter->trace = *tr->current_trace;
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2012-12-26 02:53:00 +00:00
|
|
|
if (iter->snapshot && iter->trace->use_max_tr)
|
|
|
|
return ERR_PTR(-EBUSY);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2012-12-26 02:53:00 +00:00
|
|
|
|
|
|
|
if (!iter->snapshot)
|
2017-06-27 02:01:55 +00:00
|
|
|
atomic_inc(&trace_record_taskinfo_disabled);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
if (*pos != iter->pos) {
|
|
|
|
iter->ent = NULL;
|
|
|
|
iter->cpu = 0;
|
|
|
|
iter->idx = -1;
|
|
|
|
|
2013-01-23 20:22:59 +00:00
|
|
|
if (cpu_file == RING_BUFFER_ALL_CPUS) {
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
for_each_tracing_cpu(cpu)
|
2009-09-01 15:06:29 +00:00
|
|
|
tracing_iter_reset(iter, cpu);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
} else
|
2009-09-01 15:06:29 +00:00
|
|
|
tracing_iter_reset(iter, cpu_file);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2010-03-02 09:54:50 +00:00
|
|
|
iter->leftover = 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
|
|
|
|
;
|
|
|
|
|
|
|
|
} else {
|
2009-12-07 14:11:39 +00:00
|
|
|
/*
|
|
|
|
* If we overflowed the seq_file before, then we want
|
|
|
|
* to just reuse the trace_seq buffer again.
|
|
|
|
*/
|
|
|
|
if (iter->leftover)
|
|
|
|
p = iter;
|
|
|
|
else {
|
|
|
|
l = *pos - 1;
|
|
|
|
p = s_next(m, p, &l);
|
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2009-05-18 11:35:34 +00:00
|
|
|
trace_event_read_lock();
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
trace_access_lock(cpu_file);
|
2008-05-12 19:20:42 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void s_stop(struct seq_file *m, void *p)
|
|
|
|
{
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
struct trace_iterator *iter = m->private;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2012-12-26 02:53:00 +00:00
|
|
|
if (iter->snapshot && iter->trace->use_max_tr)
|
|
|
|
return;
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2012-12-26 02:53:00 +00:00
|
|
|
|
|
|
|
if (!iter->snapshot)
|
2017-06-27 02:01:55 +00:00
|
|
|
atomic_dec(&trace_record_taskinfo_disabled);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
trace_access_unlock(iter->cpu_file);
|
2009-05-18 11:35:34 +00:00
|
|
|
trace_event_read_unlock();
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2019-03-19 17:12:05 +00:00
|
|
|
static void
|
2020-01-09 23:53:48 +00:00
|
|
|
get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
|
2019-03-19 17:12:05 +00:00
|
|
|
unsigned long *entries, int cpu)
|
|
|
|
{
|
|
|
|
unsigned long count;
|
|
|
|
|
|
|
|
count = ring_buffer_entries_cpu(buf->buffer, cpu);
|
|
|
|
/*
|
|
|
|
* If this buffer has skipped entries, then we hold all
|
|
|
|
* entries for the trace and we need to ignore the
|
|
|
|
* ones before the time stamp.
|
|
|
|
*/
|
|
|
|
if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
|
|
|
|
count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
|
|
|
|
/* total is the same as the entries */
|
|
|
|
*total = count;
|
|
|
|
} else
|
|
|
|
*total = count +
|
|
|
|
ring_buffer_overrun_cpu(buf->buffer, cpu);
|
|
|
|
*entries = count;
|
|
|
|
}
|
|
|
|
|
2011-11-17 15:35:16 +00:00
|
|
|
static void
|
2020-01-09 23:53:48 +00:00
|
|
|
get_total_entries(struct array_buffer *buf,
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
unsigned long *total, unsigned long *entries)
|
2011-11-17 15:35:16 +00:00
|
|
|
{
|
2019-03-19 17:12:05 +00:00
|
|
|
unsigned long t, e;
|
2011-11-17 15:35:16 +00:00
|
|
|
int cpu;
|
|
|
|
|
|
|
|
*total = 0;
|
|
|
|
*entries = 0;
|
|
|
|
|
|
|
|
for_each_tracing_cpu(cpu) {
|
2019-03-19 17:12:05 +00:00
|
|
|
get_total_entries_cpu(buf, &t, &e, cpu);
|
|
|
|
*total += t;
|
|
|
|
*entries += e;
|
2011-11-17 15:35:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-19 17:12:05 +00:00
|
|
|
unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
|
|
|
|
{
|
|
|
|
unsigned long total, entries;
|
|
|
|
|
|
|
|
if (!tr)
|
|
|
|
tr = &global_trace;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
|
2019-03-19 17:12:05 +00:00
|
|
|
|
|
|
|
return entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long trace_total_entries(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
unsigned long total, entries;
|
|
|
|
|
|
|
|
if (!tr)
|
|
|
|
tr = &global_trace;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
get_total_entries(&tr->array_buffer, &total, &entries);
|
2019-03-19 17:12:05 +00:00
|
|
|
|
|
|
|
return entries;
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:51 +00:00
|
|
|
static void print_lat_help_header(struct seq_file *m)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2020-09-04 08:23:31 +00:00
|
|
|
seq_puts(m, "# _------=> CPU# \n"
|
|
|
|
"# / _-----=> irqs-off \n"
|
|
|
|
"# | / _----=> need-resched \n"
|
|
|
|
"# || / _---=> hardirq/softirq \n"
|
|
|
|
"# ||| / _--=> preempt-depth \n"
|
|
|
|
"# |||| / delay \n"
|
|
|
|
"# cmd pid ||||| time | caller \n"
|
|
|
|
"# \\ / ||||| \\ | / \n");
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2011-11-17 15:35:16 +00:00
|
|
|
unsigned long total;
|
|
|
|
unsigned long entries;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
get_total_entries(buf, &total, &entries);
|
2011-11-17 15:35:16 +00:00
|
|
|
seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
|
|
|
|
entries, total, num_online_cpus());
|
|
|
|
seq_puts(m, "#\n");
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
|
2017-06-26 05:38:43 +00:00
|
|
|
unsigned int flags)
|
2011-11-17 15:35:16 +00:00
|
|
|
{
|
2017-06-26 05:38:43 +00:00
|
|
|
bool tgid = flags & TRACE_ITER_RECORD_TGID;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
print_event_info(buf, m);
|
2017-06-26 05:38:43 +00:00
|
|
|
|
2020-09-04 08:23:31 +00:00
|
|
|
seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
|
|
|
|
seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
|
2017-06-26 05:38:43 +00:00
|
|
|
unsigned int flags)
|
tracing: Add irq, preempt-count and need resched info to default trace output
People keep asking how to get the preempt count, irq, and need resched info
and we keep telling them to enable the latency format. Some developers think
that traces without this info is completely useless, and for a lot of tasks
it is useless.
The first option was to enable the latency trace as the default format, but
the header for the latency format is pretty useless for most tracers and
it also does the timestamp in straight microseconds from the time the trace
started. This is sometimes more difficult to read as the default trace is
seconds from the start of boot up.
Latency format:
# tracer: nop
#
# nop latency trace v1.1.5 on 3.2.0-rc1-test+
# --------------------------------------------------------------------
# latency: 0 us, #159771/64234230, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: -0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
migratio-6 0...2 41778231us+: rcu_note_context_switch <-__schedule
migratio-6 0...2 41778233us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778235us+: rcu_sched_qs <-rcu_note_context_switch
migratio-6 0d..2 41778236us+: rcu_preempt_qs <-rcu_note_context_switch
migratio-6 0...2 41778238us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778239us+: debug_lockdep_rcu_enabled <-__schedule
default format:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule
migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule
migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule
The latency format header has latency information that is pretty meaningless
for most tracers. Although some of the header is useful, and we can add that
later to the default format as well.
What is really useful with the latency format is the irqs-off, need-resched
hard/softirq context and the preempt count.
This commit adds the option irq-info which is on by default that adds this
information:
# tracer: nop
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] d..2 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] d..2 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] d..2 49.309309: need_resched <-mwait_idle
<idle>-0 [000] d..2 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] d..2 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] d..2 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] d..2 49.309315: need_resched <-mwait_idle
If a user wants the old format, they can disable the 'irq-info' option:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
<idle>-0 [000] 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] 49.309309: need_resched <-mwait_idle
<idle>-0 [000] 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] 49.309315: need_resched <-mwait_idle
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-17 14:34:33 +00:00
|
|
|
{
|
2017-06-26 05:38:43 +00:00
|
|
|
bool tgid = flags & TRACE_ITER_RECORD_TGID;
|
2020-09-04 08:23:31 +00:00
|
|
|
const char *space = " ";
|
|
|
|
int prec = tgid ? 12 : 2;
|
2017-07-11 19:43:24 +00:00
|
|
|
|
2019-02-14 15:29:50 +00:00
|
|
|
print_event_info(buf, m);
|
|
|
|
|
2020-09-04 08:23:31 +00:00
|
|
|
seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
|
|
|
|
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
|
|
|
|
seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
|
|
|
|
seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
|
|
|
|
seq_printf(m, "# %.*s||| / delay\n", prec, space);
|
|
|
|
seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
|
|
|
|
seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
|
tracing: Add irq, preempt-count and need resched info to default trace output
People keep asking how to get the preempt count, irq, and need resched info
and we keep telling them to enable the latency format. Some developers think
that traces without this info is completely useless, and for a lot of tasks
it is useless.
The first option was to enable the latency trace as the default format, but
the header for the latency format is pretty useless for most tracers and
it also does the timestamp in straight microseconds from the time the trace
started. This is sometimes more difficult to read as the default trace is
seconds from the start of boot up.
Latency format:
# tracer: nop
#
# nop latency trace v1.1.5 on 3.2.0-rc1-test+
# --------------------------------------------------------------------
# latency: 0 us, #159771/64234230, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: -0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
migratio-6 0...2 41778231us+: rcu_note_context_switch <-__schedule
migratio-6 0...2 41778233us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778235us+: rcu_sched_qs <-rcu_note_context_switch
migratio-6 0d..2 41778236us+: rcu_preempt_qs <-rcu_note_context_switch
migratio-6 0...2 41778238us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778239us+: debug_lockdep_rcu_enabled <-__schedule
default format:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule
migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule
migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule
The latency format header has latency information that is pretty meaningless
for most tracers. Although some of the header is useful, and we can add that
later to the default format as well.
What is really useful with the latency format is the irqs-off, need-resched
hard/softirq context and the preempt count.
This commit adds the option irq-info which is on by default that adds this
information:
# tracer: nop
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] d..2 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] d..2 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] d..2 49.309309: need_resched <-mwait_idle
<idle>-0 [000] d..2 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] d..2 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] d..2 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] d..2 49.309315: need_resched <-mwait_idle
If a user wants the old format, they can disable the 'irq-info' option:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
<idle>-0 [000] 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] 49.309309: need_resched <-mwait_idle
<idle>-0 [000] 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] 49.309315: need_resched <-mwait_idle
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-17 14:34:33 +00:00
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2010-04-02 17:01:22 +00:00
|
|
|
void
|
2008-05-12 19:20:42 +00:00
|
|
|
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
|
|
|
|
{
|
2015-09-30 13:42:05 +00:00
|
|
|
unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
|
2020-01-09 23:53:48 +00:00
|
|
|
struct array_buffer *buf = iter->array_buffer;
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
|
2012-05-11 17:29:49 +00:00
|
|
|
struct tracer *type = iter->trace;
|
2011-11-17 15:35:16 +00:00
|
|
|
unsigned long entries;
|
|
|
|
unsigned long total;
|
2008-05-12 19:20:42 +00:00
|
|
|
const char *name = "preemption";
|
|
|
|
|
2013-02-01 23:38:47 +00:00
|
|
|
name = type->name;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
get_total_entries(buf, &total, &entries);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
|
2008-05-12 19:20:42 +00:00
|
|
|
name, UTS_RELEASE);
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_puts(m, "# -----------------------------------"
|
2008-05-12 19:20:42 +00:00
|
|
|
"---------------------------------\n");
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
|
2008-05-12 19:20:42 +00:00
|
|
|
" (M:%s VP:%d, KP:%d, SP:%d HP:%d",
|
2008-05-12 19:20:44 +00:00
|
|
|
nsecs_to_usecs(data->saved_latency),
|
2008-05-12 19:20:42 +00:00
|
|
|
entries,
|
2008-05-12 19:20:43 +00:00
|
|
|
total,
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
buf->cpu,
|
2008-05-12 19:20:42 +00:00
|
|
|
#if defined(CONFIG_PREEMPT_NONE)
|
|
|
|
"server",
|
|
|
|
#elif defined(CONFIG_PREEMPT_VOLUNTARY)
|
|
|
|
"desktop",
|
2008-07-11 00:58:12 +00:00
|
|
|
#elif defined(CONFIG_PREEMPT)
|
2008-05-12 19:20:42 +00:00
|
|
|
"preempt",
|
2019-10-15 19:18:20 +00:00
|
|
|
#elif defined(CONFIG_PREEMPT_RT)
|
|
|
|
"preempt_rt",
|
2008-05-12 19:20:42 +00:00
|
|
|
#else
|
|
|
|
"unknown",
|
|
|
|
#endif
|
|
|
|
/* These are reserved for later use */
|
|
|
|
0, 0, 0, 0);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
seq_printf(m, " #P:%d)\n", num_online_cpus());
|
|
|
|
#else
|
|
|
|
seq_puts(m, ")\n");
|
|
|
|
#endif
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_puts(m, "# -----------------\n");
|
|
|
|
seq_printf(m, "# | task: %.16s-%d "
|
2008-05-12 19:20:42 +00:00
|
|
|
"(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
|
2012-03-13 23:02:19 +00:00
|
|
|
data->comm, data->pid,
|
|
|
|
from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
|
2008-05-12 19:20:42 +00:00
|
|
|
data->policy, data->rt_priority);
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_puts(m, "# -----------------\n");
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
if (data->critical_start) {
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_puts(m, "# => started at: ");
|
2008-05-12 19:20:46 +00:00
|
|
|
seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
|
|
|
|
trace_print_seq(m, &iter->seq);
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_puts(m, "\n# => ended at: ");
|
2008-05-12 19:20:46 +00:00
|
|
|
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
|
|
|
|
trace_print_seq(m, &iter->seq);
|
2009-09-02 16:27:41 +00:00
|
|
|
seq_puts(m, "\n#\n");
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
ftrace: tracing header should put '#' at the beginning of a line
In a recent discussion, Andrew Morton pointed out that tracing header
should put '#' at the beginning of a line.
Then, we can easily filtered the header by following grep usage:
cat trace | grep -v '^#'
Wakeup trace also has the same header problem.
Comparison of headers displayed:
before this patch:
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
--------------------------------------------------------------------
latency: 19059 us, #21277/21277, CPU#1 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
-----------------
| task: kondemand/1-1644 (uid:0 nice:-5 policy:0 rt_prio:0)
-----------------
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
irqbalan-1887 1d.s. 0us : 1887:120:R + [001] 1644:115:S kondemand/1
irqbalan-1887 1d.s. 1us : default_wake_function <-autoremove_wake_function
irqbalan-1887 1d.s. 2us : check_preempt_wakeup <-try_to_wake_up
after this patch:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 2.6.29-rc7-tip-tip
# --------------------------------------------------------------------
# latency: 529 us, #530/530, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: kondemand/0-1641 (uid:0 nice:-5 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /
# ||||| delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
sshd-2496 0d.s. 0us : 2496:120:R + [000] 1641:115:S kondemand/0
sshd-2496 0d.s. 1us : default_wake_function <-autoremove_wake_function
sshd-2496 0d.s. 1us : check_preempt_wakeup <-try_to_wake_up
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20090308124421.23C3.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-08 04:12:43 +00:00
|
|
|
seq_puts(m, "#\n");
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2008-11-08 03:36:02 +00:00
|
|
|
static void test_cpu_buff_start(struct trace_iterator *iter)
|
|
|
|
{
|
|
|
|
struct trace_seq *s = &iter->seq;
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
2008-11-08 03:36:02 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
|
2008-11-12 22:52:38 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
|
|
|
|
return;
|
|
|
|
|
2017-04-21 23:41:10 +00:00
|
|
|
if (cpumask_available(iter->started) &&
|
|
|
|
cpumask_test_cpu(iter->cpu, iter->started))
|
2008-11-08 03:36:02 +00:00
|
|
|
return;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
|
2009-09-01 15:06:29 +00:00
|
|
|
return;
|
|
|
|
|
2017-04-21 23:41:10 +00:00
|
|
|
if (cpumask_available(iter->started))
|
2015-09-04 16:45:56 +00:00
|
|
|
cpumask_set_cpu(iter->cpu, iter->started);
|
2009-04-01 20:53:08 +00:00
|
|
|
|
|
|
|
/* Don't print started cpu buffer for the first entry of the trace */
|
|
|
|
if (iter->idx > 1)
|
|
|
|
trace_seq_printf(s, "##### CPU %u buffer started ####\n",
|
|
|
|
iter->cpu);
|
2008-11-08 03:36:02 +00:00
|
|
|
}
|
|
|
|
|
2008-09-29 18:18:34 +00:00
|
|
|
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
2008-05-12 19:20:46 +00:00
|
|
|
struct trace_seq *s = &iter->seq;
|
2015-09-30 13:42:05 +00:00
|
|
|
unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
|
2008-05-12 19:20:45 +00:00
|
|
|
struct trace_entry *entry;
|
2008-12-24 04:24:13 +00:00
|
|
|
struct trace_event *event;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-05-12 19:20:45 +00:00
|
|
|
entry = iter->ent;
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2008-11-08 03:36:02 +00:00
|
|
|
test_cpu_buff_start(iter);
|
|
|
|
|
2009-02-02 22:29:21 +00:00
|
|
|
event = ftrace_find_event(entry->type);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
2014-11-12 15:29:54 +00:00
|
|
|
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
|
|
|
|
trace_print_lat_context(iter);
|
|
|
|
else
|
|
|
|
trace_print_context(iter);
|
2009-02-02 22:29:21 +00:00
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
if (trace_seq_has_overflowed(s))
|
|
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
|
|
|
2009-02-04 22:16:39 +00:00
|
|
|
if (event)
|
2010-04-22 22:46:14 +00:00
|
|
|
return event->funcs->trace(iter, sym_flags, event);
|
2009-02-03 22:20:41 +00:00
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
trace_seq_printf(s, "Unknown type %d\n", entry->type);
|
2008-11-22 11:28:47 +00:00
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
return trace_handle_return(s);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2008-09-29 18:18:34 +00:00
|
|
|
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
|
2008-05-12 19:20:47 +00:00
|
|
|
{
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
2008-05-12 19:20:47 +00:00
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
|
struct trace_entry *entry;
|
2008-12-24 04:24:13 +00:00
|
|
|
struct trace_event *event;
|
2008-05-12 19:20:47 +00:00
|
|
|
|
|
|
|
entry = iter->ent;
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
|
2014-11-12 15:29:54 +00:00
|
|
|
trace_seq_printf(s, "%d %d %llu ",
|
|
|
|
entry->pid, iter->cpu, iter->ts);
|
|
|
|
|
|
|
|
if (trace_seq_has_overflowed(s))
|
|
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
2008-05-12 19:20:47 +00:00
|
|
|
|
2008-12-24 04:24:13 +00:00
|
|
|
event = ftrace_find_event(entry->type);
|
2009-02-04 22:16:39 +00:00
|
|
|
if (event)
|
2010-04-22 22:46:14 +00:00
|
|
|
return event->funcs->raw(iter, 0, event);
|
2009-02-03 22:20:41 +00:00
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
trace_seq_printf(s, "%d ?\n", entry->type);
|
2008-09-30 03:02:42 +00:00
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
return trace_handle_return(s);
|
2008-05-12 19:20:47 +00:00
|
|
|
}
|
|
|
|
|
2008-09-29 18:18:34 +00:00
|
|
|
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
|
2008-05-12 19:20:49 +00:00
|
|
|
{
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
2008-05-12 19:20:49 +00:00
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
|
unsigned char newline = '\n';
|
|
|
|
struct trace_entry *entry;
|
2008-12-24 04:24:13 +00:00
|
|
|
struct trace_event *event;
|
2008-05-12 19:20:49 +00:00
|
|
|
|
|
|
|
entry = iter->ent;
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
2014-11-12 15:29:54 +00:00
|
|
|
SEQ_PUT_HEX_FIELD(s, entry->pid);
|
|
|
|
SEQ_PUT_HEX_FIELD(s, iter->cpu);
|
|
|
|
SEQ_PUT_HEX_FIELD(s, iter->ts);
|
|
|
|
if (trace_seq_has_overflowed(s))
|
|
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
2009-02-02 22:29:21 +00:00
|
|
|
}
|
2008-05-12 19:20:49 +00:00
|
|
|
|
2008-12-24 04:24:13 +00:00
|
|
|
event = ftrace_find_event(entry->type);
|
2009-02-04 22:16:39 +00:00
|
|
|
if (event) {
|
2010-04-22 22:46:14 +00:00
|
|
|
enum print_line_t ret = event->funcs->hex(iter, 0, event);
|
2009-02-03 22:20:41 +00:00
|
|
|
if (ret != TRACE_TYPE_HANDLED)
|
|
|
|
return ret;
|
|
|
|
}
|
2008-10-01 14:52:51 +00:00
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
SEQ_PUT_FIELD(s, newline);
|
2008-05-12 19:20:49 +00:00
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
return trace_handle_return(s);
|
2008-05-12 19:20:49 +00:00
|
|
|
}
|
|
|
|
|
2008-09-29 18:18:34 +00:00
|
|
|
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
|
2008-05-12 19:20:47 +00:00
|
|
|
{
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
2008-05-12 19:20:47 +00:00
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
|
struct trace_entry *entry;
|
2008-12-24 04:24:13 +00:00
|
|
|
struct trace_event *event;
|
2008-05-12 19:20:47 +00:00
|
|
|
|
|
|
|
entry = iter->ent;
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
2014-11-12 15:29:54 +00:00
|
|
|
SEQ_PUT_FIELD(s, entry->pid);
|
|
|
|
SEQ_PUT_FIELD(s, iter->cpu);
|
|
|
|
SEQ_PUT_FIELD(s, iter->ts);
|
|
|
|
if (trace_seq_has_overflowed(s))
|
|
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
2009-02-02 22:29:21 +00:00
|
|
|
}
|
2008-05-12 19:20:47 +00:00
|
|
|
|
2008-12-24 04:24:13 +00:00
|
|
|
event = ftrace_find_event(entry->type);
|
2010-04-22 22:46:14 +00:00
|
|
|
return event ? event->funcs->binary(iter, 0, event) :
|
|
|
|
TRACE_TYPE_HANDLED;
|
2008-05-12 19:20:47 +00:00
|
|
|
}
|
|
|
|
|
2010-04-02 17:01:22 +00:00
|
|
|
int trace_empty(struct trace_iterator *iter)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2012-06-28 00:46:14 +00:00
|
|
|
struct ring_buffer_iter *buf_iter;
|
2008-05-12 19:20:42 +00:00
|
|
|
int cpu;
|
|
|
|
|
2009-03-11 23:52:30 +00:00
|
|
|
/* If we are looking at one CPU buffer, only check that one */
|
2013-01-23 20:22:59 +00:00
|
|
|
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
|
2009-03-11 23:52:30 +00:00
|
|
|
cpu = iter->cpu_file;
|
2012-06-28 00:46:14 +00:00
|
|
|
buf_iter = trace_buffer_iter(iter, cpu);
|
|
|
|
if (buf_iter) {
|
|
|
|
if (!ring_buffer_iter_empty(buf_iter))
|
2009-03-11 23:52:30 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
2020-01-09 23:53:48 +00:00
|
|
|
if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
|
2009-03-11 23:52:30 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
for_each_tracing_cpu(cpu) {
|
2012-06-28 00:46:14 +00:00
|
|
|
buf_iter = trace_buffer_iter(iter, cpu);
|
|
|
|
if (buf_iter) {
|
|
|
|
if (!ring_buffer_iter_empty(buf_iter))
|
2008-10-01 04:29:53 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
2020-01-09 23:53:48 +00:00
|
|
|
if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
|
2008-10-01 04:29:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
2008-10-01 04:29:53 +00:00
|
|
|
|
2008-09-30 16:13:45 +00:00
|
|
|
return 1;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2009-05-18 11:35:34 +00:00
|
|
|
/* Called with trace_event_read_lock() held. */
|
2010-08-05 14:22:23 +00:00
|
|
|
enum print_line_t print_trace_line(struct trace_iterator *iter)
|
2008-05-12 19:20:47 +00:00
|
|
|
{
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
|
|
|
unsigned long trace_flags = tr->trace_flags;
|
2008-09-29 18:18:34 +00:00
|
|
|
enum print_line_t ret;
|
|
|
|
|
2014-11-12 15:29:54 +00:00
|
|
|
if (iter->lost_events) {
|
2020-03-17 21:32:32 +00:00
|
|
|
if (iter->lost_events == (unsigned long)-1)
|
|
|
|
trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
|
|
|
|
iter->cpu);
|
|
|
|
else
|
|
|
|
trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
|
|
|
|
iter->cpu, iter->lost_events);
|
2014-11-12 15:29:54 +00:00
|
|
|
if (trace_seq_has_overflowed(&iter->seq))
|
|
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
|
|
}
|
2010-03-31 23:49:26 +00:00
|
|
|
|
2008-09-29 18:18:34 +00:00
|
|
|
if (iter->trace && iter->trace->print_line) {
|
|
|
|
ret = iter->trace->print_line(iter);
|
|
|
|
if (ret != TRACE_TYPE_UNHANDLED)
|
|
|
|
return ret;
|
|
|
|
}
|
2008-05-23 19:37:28 +00:00
|
|
|
|
2013-03-09 02:02:34 +00:00
|
|
|
if (iter->ent->type == TRACE_BPUTS &&
|
|
|
|
trace_flags & TRACE_ITER_PRINTK &&
|
|
|
|
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
|
|
|
|
return trace_print_bputs_msg_only(iter);
|
|
|
|
|
2009-03-12 17:24:49 +00:00
|
|
|
if (iter->ent->type == TRACE_BPRINT &&
|
|
|
|
trace_flags & TRACE_ITER_PRINTK &&
|
|
|
|
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
|
2009-03-19 16:20:38 +00:00
|
|
|
return trace_print_bprintk_msg_only(iter);
|
2009-03-12 17:24:49 +00:00
|
|
|
|
2008-12-13 19:18:13 +00:00
|
|
|
if (iter->ent->type == TRACE_PRINT &&
|
|
|
|
trace_flags & TRACE_ITER_PRINTK &&
|
|
|
|
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
|
2009-03-19 16:20:38 +00:00
|
|
|
return trace_print_printk_msg_only(iter);
|
2008-12-13 19:18:13 +00:00
|
|
|
|
2008-05-12 19:20:47 +00:00
|
|
|
if (trace_flags & TRACE_ITER_BIN)
|
|
|
|
return print_bin_fmt(iter);
|
|
|
|
|
2008-05-12 19:20:49 +00:00
|
|
|
if (trace_flags & TRACE_ITER_HEX)
|
|
|
|
return print_hex_fmt(iter);
|
|
|
|
|
2008-05-12 19:20:47 +00:00
|
|
|
if (trace_flags & TRACE_ITER_RAW)
|
|
|
|
return print_raw_fmt(iter);
|
|
|
|
|
|
|
|
return print_trace_fmt(iter);
|
|
|
|
}
|
|
|
|
|
tracing/latency: Fix header output for latency tracers
In case the the graph tracer (CONFIG_FUNCTION_GRAPH_TRACER) or even the
function tracer (CONFIG_FUNCTION_TRACER) are not set, the latency tracers
do not display proper latency header.
The involved/fixed latency tracers are:
wakeup_rt
wakeup
preemptirqsoff
preemptoff
irqsoff
The patch adds proper handling of tracer configuration options for latency
tracers, and displaying correct header info accordingly.
* The current output (for wakeup tracer) with both graph and function
tracers disabled is:
# tracer: wakeup
#
<idle>-0 0d.h5 1us+: 0:120:R + [000] 7: 0:R watchdog/0
<idle>-0 0d.h5 3us+: ttwu_do_activate.clone.1 <-try_to_wake_up
...
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 55 us, #4/4, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: migration/0-6 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
cat-1129 0d..4 1us : 1129:120:R + [000] 6: 0:R migration/0
cat-1129 0d..4 2us+: ttwu_do_activate.clone.1 <-try_to_wake_up
* The current output (for wakeup tracer) with only function
tracer enabled is:
# tracer: wakeup
#
cat-1140 0d..4 1us+: 1140:120:R + [000] 6: 0:R migration/0
cat-1140 0d..4 2us : ttwu_do_activate.clone.1 <-try_to_wake_up
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 207 us, #109/109, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: watchdog/1-12 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
<idle>-0 1d.h5 1us+: 0:120:R + [001] 12: 0:R watchdog/1
<idle>-0 1d.h5 3us : ttwu_do_activate.clone.1 <-try_to_wake_up
Link: http://lkml.kernel.org/r/20111107150849.GE1807@m.brq.redhat.com
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-07 15:08:49 +00:00
|
|
|
void trace_latency_header(struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct trace_iterator *iter = m->private;
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
tracing/latency: Fix header output for latency tracers
In case the the graph tracer (CONFIG_FUNCTION_GRAPH_TRACER) or even the
function tracer (CONFIG_FUNCTION_TRACER) are not set, the latency tracers
do not display proper latency header.
The involved/fixed latency tracers are:
wakeup_rt
wakeup
preemptirqsoff
preemptoff
irqsoff
The patch adds proper handling of tracer configuration options for latency
tracers, and displaying correct header info accordingly.
* The current output (for wakeup tracer) with both graph and function
tracers disabled is:
# tracer: wakeup
#
<idle>-0 0d.h5 1us+: 0:120:R + [000] 7: 0:R watchdog/0
<idle>-0 0d.h5 3us+: ttwu_do_activate.clone.1 <-try_to_wake_up
...
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 55 us, #4/4, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: migration/0-6 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
cat-1129 0d..4 1us : 1129:120:R + [000] 6: 0:R migration/0
cat-1129 0d..4 2us+: ttwu_do_activate.clone.1 <-try_to_wake_up
* The current output (for wakeup tracer) with only function
tracer enabled is:
# tracer: wakeup
#
cat-1140 0d..4 1us+: 1140:120:R + [000] 6: 0:R migration/0
cat-1140 0d..4 2us : ttwu_do_activate.clone.1 <-try_to_wake_up
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 207 us, #109/109, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: watchdog/1-12 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
<idle>-0 1d.h5 1us+: 0:120:R + [001] 12: 0:R watchdog/1
<idle>-0 1d.h5 3us : ttwu_do_activate.clone.1 <-try_to_wake_up
Link: http://lkml.kernel.org/r/20111107150849.GE1807@m.brq.redhat.com
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-07 15:08:49 +00:00
|
|
|
|
|
|
|
/* print nothing if the buffers are empty */
|
|
|
|
if (trace_empty(iter))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
|
|
|
|
print_trace_header(m, iter);
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
|
tracing/latency: Fix header output for latency tracers
In case the the graph tracer (CONFIG_FUNCTION_GRAPH_TRACER) or even the
function tracer (CONFIG_FUNCTION_TRACER) are not set, the latency tracers
do not display proper latency header.
The involved/fixed latency tracers are:
wakeup_rt
wakeup
preemptirqsoff
preemptoff
irqsoff
The patch adds proper handling of tracer configuration options for latency
tracers, and displaying correct header info accordingly.
* The current output (for wakeup tracer) with both graph and function
tracers disabled is:
# tracer: wakeup
#
<idle>-0 0d.h5 1us+: 0:120:R + [000] 7: 0:R watchdog/0
<idle>-0 0d.h5 3us+: ttwu_do_activate.clone.1 <-try_to_wake_up
...
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 55 us, #4/4, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: migration/0-6 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
cat-1129 0d..4 1us : 1129:120:R + [000] 6: 0:R migration/0
cat-1129 0d..4 2us+: ttwu_do_activate.clone.1 <-try_to_wake_up
* The current output (for wakeup tracer) with only function
tracer enabled is:
# tracer: wakeup
#
cat-1140 0d..4 1us+: 1140:120:R + [000] 6: 0:R migration/0
cat-1140 0d..4 2us : ttwu_do_activate.clone.1 <-try_to_wake_up
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 207 us, #109/109, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: watchdog/1-12 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
<idle>-0 1d.h5 1us+: 0:120:R + [001] 12: 0:R watchdog/1
<idle>-0 1d.h5 3us : ttwu_do_activate.clone.1 <-try_to_wake_up
Link: http://lkml.kernel.org/r/20111107150849.GE1807@m.brq.redhat.com
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-07 15:08:49 +00:00
|
|
|
print_lat_help_header(m);
|
|
|
|
}
|
|
|
|
|
2010-04-02 17:01:22 +00:00
|
|
|
void trace_default_header(struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct trace_iterator *iter = m->private;
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
|
|
|
unsigned long trace_flags = tr->trace_flags;
|
2010-04-02 17:01:22 +00:00
|
|
|
|
2011-06-03 14:58:49 +00:00
|
|
|
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
|
|
|
return;
|
|
|
|
|
2010-04-02 17:01:22 +00:00
|
|
|
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
|
|
|
|
/* print nothing if the buffers are empty */
|
|
|
|
if (trace_empty(iter))
|
|
|
|
return;
|
|
|
|
print_trace_header(m, iter);
|
|
|
|
if (!(trace_flags & TRACE_ITER_VERBOSE))
|
|
|
|
print_lat_help_header(m);
|
|
|
|
} else {
|
tracing: Add irq, preempt-count and need resched info to default trace output
People keep asking how to get the preempt count, irq, and need resched info
and we keep telling them to enable the latency format. Some developers think
that traces without this info is completely useless, and for a lot of tasks
it is useless.
The first option was to enable the latency trace as the default format, but
the header for the latency format is pretty useless for most tracers and
it also does the timestamp in straight microseconds from the time the trace
started. This is sometimes more difficult to read as the default trace is
seconds from the start of boot up.
Latency format:
# tracer: nop
#
# nop latency trace v1.1.5 on 3.2.0-rc1-test+
# --------------------------------------------------------------------
# latency: 0 us, #159771/64234230, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: -0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
migratio-6 0...2 41778231us+: rcu_note_context_switch <-__schedule
migratio-6 0...2 41778233us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778235us+: rcu_sched_qs <-rcu_note_context_switch
migratio-6 0d..2 41778236us+: rcu_preempt_qs <-rcu_note_context_switch
migratio-6 0...2 41778238us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778239us+: debug_lockdep_rcu_enabled <-__schedule
default format:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule
migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule
migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule
The latency format header has latency information that is pretty meaningless
for most tracers. Although some of the header is useful, and we can add that
later to the default format as well.
What is really useful with the latency format is the irqs-off, need-resched
hard/softirq context and the preempt count.
This commit adds the option irq-info which is on by default that adds this
information:
# tracer: nop
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] d..2 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] d..2 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] d..2 49.309309: need_resched <-mwait_idle
<idle>-0 [000] d..2 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] d..2 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] d..2 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] d..2 49.309315: need_resched <-mwait_idle
If a user wants the old format, they can disable the 'irq-info' option:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
<idle>-0 [000] 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] 49.309309: need_resched <-mwait_idle
<idle>-0 [000] 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] 49.309315: need_resched <-mwait_idle
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-17 14:34:33 +00:00
|
|
|
if (!(trace_flags & TRACE_ITER_VERBOSE)) {
|
|
|
|
if (trace_flags & TRACE_ITER_IRQ_INFO)
|
2020-01-09 23:53:48 +00:00
|
|
|
print_func_help_header_irq(iter->array_buffer,
|
2017-06-26 05:38:43 +00:00
|
|
|
m, trace_flags);
|
tracing: Add irq, preempt-count and need resched info to default trace output
People keep asking how to get the preempt count, irq, and need resched info
and we keep telling them to enable the latency format. Some developers think
that traces without this info is completely useless, and for a lot of tasks
it is useless.
The first option was to enable the latency trace as the default format, but
the header for the latency format is pretty useless for most tracers and
it also does the timestamp in straight microseconds from the time the trace
started. This is sometimes more difficult to read as the default trace is
seconds from the start of boot up.
Latency format:
# tracer: nop
#
# nop latency trace v1.1.5 on 3.2.0-rc1-test+
# --------------------------------------------------------------------
# latency: 0 us, #159771/64234230, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: -0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
migratio-6 0...2 41778231us+: rcu_note_context_switch <-__schedule
migratio-6 0...2 41778233us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778235us+: rcu_sched_qs <-rcu_note_context_switch
migratio-6 0d..2 41778236us+: rcu_preempt_qs <-rcu_note_context_switch
migratio-6 0...2 41778238us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778239us+: debug_lockdep_rcu_enabled <-__schedule
default format:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule
migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule
migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule
The latency format header has latency information that is pretty meaningless
for most tracers. Although some of the header is useful, and we can add that
later to the default format as well.
What is really useful with the latency format is the irqs-off, need-resched
hard/softirq context and the preempt count.
This commit adds the option irq-info which is on by default that adds this
information:
# tracer: nop
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] d..2 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] d..2 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] d..2 49.309309: need_resched <-mwait_idle
<idle>-0 [000] d..2 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] d..2 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] d..2 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] d..2 49.309315: need_resched <-mwait_idle
If a user wants the old format, they can disable the 'irq-info' option:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
<idle>-0 [000] 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] 49.309309: need_resched <-mwait_idle
<idle>-0 [000] 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] 49.309315: need_resched <-mwait_idle
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-17 14:34:33 +00:00
|
|
|
else
|
2020-01-09 23:53:48 +00:00
|
|
|
print_func_help_header(iter->array_buffer, m,
|
2017-06-26 05:38:43 +00:00
|
|
|
trace_flags);
|
tracing: Add irq, preempt-count and need resched info to default trace output
People keep asking how to get the preempt count, irq, and need resched info
and we keep telling them to enable the latency format. Some developers think
that traces without this info is completely useless, and for a lot of tasks
it is useless.
The first option was to enable the latency trace as the default format, but
the header for the latency format is pretty useless for most tracers and
it also does the timestamp in straight microseconds from the time the trace
started. This is sometimes more difficult to read as the default trace is
seconds from the start of boot up.
Latency format:
# tracer: nop
#
# nop latency trace v1.1.5 on 3.2.0-rc1-test+
# --------------------------------------------------------------------
# latency: 0 us, #159771/64234230, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: -0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
migratio-6 0...2 41778231us+: rcu_note_context_switch <-__schedule
migratio-6 0...2 41778233us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778235us+: rcu_sched_qs <-rcu_note_context_switch
migratio-6 0d..2 41778236us+: rcu_preempt_qs <-rcu_note_context_switch
migratio-6 0...2 41778238us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778239us+: debug_lockdep_rcu_enabled <-__schedule
default format:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule
migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule
migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule
The latency format header has latency information that is pretty meaningless
for most tracers. Although some of the header is useful, and we can add that
later to the default format as well.
What is really useful with the latency format is the irqs-off, need-resched
hard/softirq context and the preempt count.
This commit adds the option irq-info which is on by default that adds this
information:
# tracer: nop
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] d..2 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] d..2 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] d..2 49.309309: need_resched <-mwait_idle
<idle>-0 [000] d..2 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] d..2 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] d..2 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] d..2 49.309315: need_resched <-mwait_idle
If a user wants the old format, they can disable the 'irq-info' option:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
<idle>-0 [000] 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] 49.309309: need_resched <-mwait_idle
<idle>-0 [000] 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] 49.309315: need_resched <-mwait_idle
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-17 14:34:33 +00:00
|
|
|
}
|
2010-04-02 17:01:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-30 01:26:16 +00:00
|
|
|
static void test_ftrace_alive(struct seq_file *m)
|
|
|
|
{
|
|
|
|
if (!ftrace_is_dead())
|
|
|
|
return;
|
2014-11-08 20:42:11 +00:00
|
|
|
seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
|
|
|
|
"# MAY BE MISSING FUNCTION EVENTS\n");
|
2011-09-30 01:26:16 +00:00
|
|
|
}
|
|
|
|
|
2013-03-05 15:25:16 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2013-03-05 19:35:11 +00:00
|
|
|
static void show_snapshot_main_help(struct seq_file *m)
|
2013-03-05 15:25:16 +00:00
|
|
|
{
|
2014-11-08 20:42:11 +00:00
|
|
|
seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
|
|
|
|
"# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
|
|
|
|
"# Takes a snapshot of the main buffer.\n"
|
|
|
|
"# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
|
|
|
|
"# (Doesn't have to be '2' works with any number that\n"
|
|
|
|
"# is not a '0' or '1')\n");
|
2013-03-05 15:25:16 +00:00
|
|
|
}
|
2013-03-05 19:35:11 +00:00
|
|
|
|
|
|
|
static void show_snapshot_percpu_help(struct seq_file *m)
|
|
|
|
{
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
|
2013-03-05 19:35:11 +00:00
|
|
|
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
|
2014-11-08 20:42:11 +00:00
|
|
|
seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
|
|
|
|
"# Takes a snapshot of the main buffer for this cpu.\n");
|
2013-03-05 19:35:11 +00:00
|
|
|
#else
|
2014-11-08 20:42:11 +00:00
|
|
|
seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
|
|
|
|
"# Must use main snapshot file to allocate.\n");
|
2013-03-05 19:35:11 +00:00
|
|
|
#endif
|
2014-11-08 20:42:11 +00:00
|
|
|
seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
|
|
|
|
"# (Doesn't have to be '2' works with any number that\n"
|
|
|
|
"# is not a '0' or '1')\n");
|
2013-03-05 19:35:11 +00:00
|
|
|
}
|
|
|
|
|
2013-03-05 15:25:16 +00:00
|
|
|
static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
|
|
|
|
{
|
2013-03-05 23:25:02 +00:00
|
|
|
if (iter->tr->allocated_snapshot)
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
|
2013-03-05 15:25:16 +00:00
|
|
|
else
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
|
2013-03-05 15:25:16 +00:00
|
|
|
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_puts(m, "# Snapshot commands:\n");
|
2013-03-05 19:35:11 +00:00
|
|
|
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
|
|
|
|
show_snapshot_main_help(m);
|
|
|
|
else
|
|
|
|
show_snapshot_percpu_help(m);
|
2013-03-05 15:25:16 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* Should never be called */
|
|
|
|
static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
|
|
|
|
#endif
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
static int s_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct trace_iterator *iter = v;
|
2009-12-07 14:11:39 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
if (iter->ent == NULL) {
|
|
|
|
if (iter->tr) {
|
|
|
|
seq_printf(m, "# tracer: %s\n", iter->trace->name);
|
|
|
|
seq_puts(m, "#\n");
|
2011-09-30 01:26:16 +00:00
|
|
|
test_ftrace_alive(m);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
2013-03-05 15:25:16 +00:00
|
|
|
if (iter->snapshot && trace_empty(iter))
|
|
|
|
print_snapshot_help(m, iter);
|
|
|
|
else if (iter->trace && iter->trace->print_header)
|
2008-11-25 08:12:31 +00:00
|
|
|
iter->trace->print_header(m);
|
2010-04-02 17:01:22 +00:00
|
|
|
else
|
|
|
|
trace_default_header(m);
|
|
|
|
|
2009-12-07 14:11:39 +00:00
|
|
|
} else if (iter->leftover) {
|
|
|
|
/*
|
|
|
|
* If we filled the seq_file buffer earlier, we
|
|
|
|
* want to just show it now.
|
|
|
|
*/
|
|
|
|
ret = trace_print_seq(m, &iter->seq);
|
|
|
|
|
|
|
|
/* ret should this time be zero, but you never know */
|
|
|
|
iter->leftover = ret;
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
} else {
|
2008-05-12 19:20:47 +00:00
|
|
|
print_trace_line(iter);
|
2009-12-07 14:11:39 +00:00
|
|
|
ret = trace_print_seq(m, &iter->seq);
|
|
|
|
/*
|
|
|
|
* If we overflow the seq_file buffer, then it will
|
|
|
|
* ask us for this data again at start up.
|
|
|
|
* Use that instead.
|
|
|
|
* ret is 0 if seq_file write succeeded.
|
|
|
|
* -1 otherwise.
|
|
|
|
*/
|
|
|
|
iter->leftover = ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
/*
|
|
|
|
* Should be used after trace_array_get(), trace_types_lock
|
|
|
|
* ensures that i_cdev was already initialized.
|
|
|
|
*/
|
|
|
|
static inline int tracing_get_cpu(struct inode *inode)
|
|
|
|
{
|
|
|
|
if (inode->i_cdev) /* See trace_create_cpu_file() */
|
|
|
|
return (long)inode->i_cdev - 1;
|
|
|
|
return RING_BUFFER_ALL_CPUS;
|
|
|
|
}
|
|
|
|
|
2009-09-22 23:43:43 +00:00
|
|
|
static const struct seq_operations tracer_seq_ops = {
|
2008-05-12 19:20:46 +00:00
|
|
|
.start = s_start,
|
|
|
|
.next = s_next,
|
|
|
|
.stop = s_stop,
|
|
|
|
.show = s_show,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
|
|
|
|
2008-05-12 19:20:51 +00:00
|
|
|
static struct trace_iterator *
|
2013-07-23 15:26:10 +00:00
|
|
|
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2013-07-23 15:26:10 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2008-05-12 19:20:42 +00:00
|
|
|
struct trace_iterator *iter;
|
2012-04-25 08:23:39 +00:00
|
|
|
int cpu;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-02-27 05:12:38 +00:00
|
|
|
if (tracing_disabled)
|
|
|
|
return ERR_PTR(-ENODEV);
|
2008-05-12 19:20:44 +00:00
|
|
|
|
2012-04-25 08:23:39 +00:00
|
|
|
iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
|
2009-02-27 05:12:38 +00:00
|
|
|
if (!iter)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2015-06-09 07:32:35 +00:00
|
|
|
iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
|
2012-06-28 00:46:14 +00:00
|
|
|
GFP_KERNEL);
|
2012-07-11 06:35:08 +00:00
|
|
|
if (!iter->buffer_iter)
|
|
|
|
goto release;
|
|
|
|
|
2020-03-17 21:32:23 +00:00
|
|
|
/*
|
|
|
|
* trace_find_next_entry() may need to save off iter->ent.
|
|
|
|
* It will place it into the iter->temp buffer. As most
|
|
|
|
* events are less than 128, allocate a buffer of that size.
|
|
|
|
* If one is greater, then trace_find_next_entry() will
|
|
|
|
* allocate a new buffer to adjust for the bigger iter->ent.
|
|
|
|
* It's not critical if it fails to get allocated here.
|
|
|
|
*/
|
|
|
|
iter->temp = kmalloc(128, GFP_KERNEL);
|
|
|
|
if (iter->temp)
|
|
|
|
iter->temp_size = 128;
|
|
|
|
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
/*
|
|
|
|
* We make a copy of the current tracer to avoid concurrent
|
|
|
|
* changes on it while we are reading.
|
|
|
|
*/
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
|
2009-02-27 05:12:38 +00:00
|
|
|
if (!iter->trace)
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
goto fail;
|
2009-02-27 05:12:38 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
*iter->trace = *tr->current_trace;
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
|
2009-06-15 06:58:26 +00:00
|
|
|
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
|
2009-04-01 20:53:08 +00:00
|
|
|
goto fail;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
iter->tr = tr;
|
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2012-05-11 17:29:49 +00:00
|
|
|
/* Currently only the top directory has a snapshot */
|
|
|
|
if (tr->current_trace->print_max || snapshot)
|
2020-01-09 23:53:48 +00:00
|
|
|
iter->array_buffer = &tr->max_buffer;
|
2008-05-12 19:20:42 +00:00
|
|
|
else
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2020-01-09 23:53:48 +00:00
|
|
|
iter->array_buffer = &tr->array_buffer;
|
2012-12-26 02:53:00 +00:00
|
|
|
iter->snapshot = snapshot;
|
2008-05-12 19:20:42 +00:00
|
|
|
iter->pos = -1;
|
2013-07-23 15:26:10 +00:00
|
|
|
iter->cpu_file = tracing_get_cpu(inode);
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_init(&iter->mutex);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-11-25 08:12:31 +00:00
|
|
|
/* Notify the tracer early; before we stop tracing. */
|
2014-11-22 18:30:12 +00:00
|
|
|
if (iter->trace->open)
|
2008-12-11 12:53:26 +00:00
|
|
|
iter->trace->open(iter);
|
2008-11-25 08:12:31 +00:00
|
|
|
|
2008-11-12 22:52:38 +00:00
|
|
|
/* Annotate start of buffers if we had overruns */
|
2020-01-09 23:53:48 +00:00
|
|
|
if (ring_buffer_overruns(iter->array_buffer->buffer))
|
2008-11-12 22:52:38 +00:00
|
|
|
iter->iter_flags |= TRACE_FILE_ANNOTATE;
|
|
|
|
|
2012-11-13 20:18:22 +00:00
|
|
|
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
|
2013-04-23 01:32:39 +00:00
|
|
|
if (trace_clocks[tr->clock_id].in_ns)
|
2012-11-13 20:18:22 +00:00
|
|
|
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
|
|
|
|
2020-03-17 21:32:31 +00:00
|
|
|
/*
|
|
|
|
* If pause-on-trace is enabled, then stop the trace while
|
|
|
|
* dumping, unless this is the "snapshot" file
|
|
|
|
*/
|
|
|
|
if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
|
2012-05-11 17:29:49 +00:00
|
|
|
tracing_stop_tr(tr);
|
2009-09-01 15:06:29 +00:00
|
|
|
|
2013-01-23 20:22:59 +00:00
|
|
|
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
|
iter->buffer_iter[cpu] =
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_read_prepare(iter->array_buffer->buffer,
|
2019-03-08 19:32:04 +00:00
|
|
|
cpu, GFP_KERNEL);
|
2010-04-20 22:47:11 +00:00
|
|
|
}
|
|
|
|
ring_buffer_read_prepare_sync();
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
|
ring_buffer_read_start(iter->buffer_iter[cpu]);
|
2009-09-01 15:06:29 +00:00
|
|
|
tracing_iter_reset(iter, cpu);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
cpu = iter->cpu_file;
|
2008-09-30 03:02:41 +00:00
|
|
|
iter->buffer_iter[cpu] =
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_read_prepare(iter->array_buffer->buffer,
|
2019-03-08 19:32:04 +00:00
|
|
|
cpu, GFP_KERNEL);
|
2010-04-20 22:47:11 +00:00
|
|
|
ring_buffer_read_prepare_sync();
|
|
|
|
ring_buffer_read_start(iter->buffer_iter[cpu]);
|
2009-09-01 15:06:29 +00:00
|
|
|
tracing_iter_reset(iter, cpu);
|
2008-09-30 03:02:41 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return iter;
|
2008-09-30 03:02:41 +00:00
|
|
|
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
fail:
|
2008-09-30 03:02:41 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
kfree(iter->trace);
|
2020-03-17 21:32:23 +00:00
|
|
|
kfree(iter->temp);
|
2012-06-28 00:46:14 +00:00
|
|
|
kfree(iter->buffer_iter);
|
2012-07-11 06:35:08 +00:00
|
|
|
release:
|
2012-04-25 08:23:39 +00:00
|
|
|
seq_release_private(inode, file);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int tracing_open_generic(struct inode *inode, struct file *filp)
|
|
|
|
{
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tracing_check_open_get_tr(NULL);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-05-12 19:20:44 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
filp->private_data = inode->i_private;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-19 00:15:54 +00:00
|
|
|
bool tracing_is_disabled(void)
|
|
|
|
{
|
|
|
|
return (tracing_disabled) ? true: false;
|
|
|
|
}
|
|
|
|
|
2013-07-02 03:34:22 +00:00
|
|
|
/*
|
|
|
|
* Open and update trace_array ref count.
|
|
|
|
* Must have the current trace_array passed to it.
|
|
|
|
*/
|
2019-10-11 23:12:21 +00:00
|
|
|
int tracing_open_generic_tr(struct inode *inode, struct file *filp)
|
2013-07-02 03:34:22 +00:00
|
|
|
{
|
|
|
|
struct trace_array *tr = inode->i_private;
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
2013-07-02 03:34:22 +00:00
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-02 03:34:22 +00:00
|
|
|
|
|
|
|
filp->private_data = inode->i_private;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-02-10 18:44:12 +00:00
|
|
|
static int tracing_release(struct inode *inode, struct file *file)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2013-07-23 15:26:10 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2010-09-28 02:04:53 +00:00
|
|
|
struct seq_file *m = file->private_data;
|
2009-03-18 14:40:24 +00:00
|
|
|
struct trace_iterator *iter;
|
2008-09-30 03:02:41 +00:00
|
|
|
int cpu;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2013-07-02 02:50:29 +00:00
|
|
|
if (!(file->f_mode & FMODE_READ)) {
|
2013-07-23 15:26:10 +00:00
|
|
|
trace_array_put(tr);
|
2009-03-18 14:40:24 +00:00
|
|
|
return 0;
|
2013-07-02 02:50:29 +00:00
|
|
|
}
|
2009-03-18 14:40:24 +00:00
|
|
|
|
2013-07-23 15:26:10 +00:00
|
|
|
/* Writes do not use seq_file */
|
2009-03-18 14:40:24 +00:00
|
|
|
iter = m->private;
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2013-03-06 20:27:24 +00:00
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
|
if (iter->buffer_iter[cpu])
|
|
|
|
ring_buffer_read_finish(iter->buffer_iter[cpu]);
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
if (iter->trace && iter->trace->close)
|
|
|
|
iter->trace->close(iter);
|
|
|
|
|
2020-03-17 21:32:31 +00:00
|
|
|
if (!iter->snapshot && tr->stop_count)
|
2012-12-26 02:53:00 +00:00
|
|
|
/* reenable tracing if it was previously enabled */
|
2012-05-11 17:29:49 +00:00
|
|
|
tracing_start_tr(tr);
|
2013-07-18 18:18:44 +00:00
|
|
|
|
|
|
|
__trace_array_put(tr);
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_destroy(&iter->mutex);
|
2009-04-01 20:53:08 +00:00
|
|
|
free_cpumask_var(iter->started);
|
2020-03-17 21:32:23 +00:00
|
|
|
kfree(iter->temp);
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
kfree(iter->trace);
|
2012-06-28 00:46:14 +00:00
|
|
|
kfree(iter->buffer_iter);
|
2012-04-25 08:23:39 +00:00
|
|
|
seq_release_private(inode, file);
|
2013-07-02 02:50:29 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-02 03:34:22 +00:00
|
|
|
static int tracing_release_generic_tr(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
|
|
|
|
trace_array_put(tr);
|
2008-05-12 19:20:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-02 03:34:22 +00:00
|
|
|
static int tracing_single_release_tr(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
|
|
|
return single_release(inode, file);
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
static int tracing_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-07-23 15:26:10 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2009-02-27 05:12:38 +00:00
|
|
|
struct trace_iterator *iter;
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-02 02:50:29 +00:00
|
|
|
|
2009-03-18 14:40:24 +00:00
|
|
|
/* If this file was open for write, then erase contents */
|
2013-07-23 15:26:10 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
|
|
|
|
int cpu = tracing_get_cpu(inode);
|
2020-01-09 23:53:48 +00:00
|
|
|
struct array_buffer *trace_buf = &tr->array_buffer;
|
2017-09-18 17:03:35 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
if (tr->current_trace->print_max)
|
|
|
|
trace_buf = &tr->max_buffer;
|
|
|
|
#endif
|
2013-07-23 15:26:10 +00:00
|
|
|
|
|
|
|
if (cpu == RING_BUFFER_ALL_CPUS)
|
2017-09-18 17:03:35 +00:00
|
|
|
tracing_reset_online_cpus(trace_buf);
|
2009-03-18 14:40:24 +00:00
|
|
|
else
|
2019-08-13 16:14:35 +00:00
|
|
|
tracing_reset_cpu(trace_buf, cpu);
|
2009-03-18 14:40:24 +00:00
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-03-18 14:40:24 +00:00
|
|
|
if (file->f_mode & FMODE_READ) {
|
2013-07-23 15:26:10 +00:00
|
|
|
iter = __tracing_open(inode, file, false);
|
2009-03-18 14:40:24 +00:00
|
|
|
if (IS_ERR(iter))
|
|
|
|
ret = PTR_ERR(iter);
|
2015-09-30 13:42:05 +00:00
|
|
|
else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
|
2009-03-18 14:40:24 +00:00
|
|
|
iter->iter_flags |= TRACE_FILE_LAT_FMT;
|
|
|
|
}
|
2013-07-02 02:50:29 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-11-07 03:42:48 +00:00
|
|
|
/*
|
|
|
|
* Some tracers are not suitable for instance buffers.
|
|
|
|
* A tracer is always available for the global array (toplevel)
|
|
|
|
* or if it explicitly states that it is.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
trace_ok_for_array(struct tracer *t, struct trace_array *tr)
|
|
|
|
{
|
|
|
|
return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the next tracer that this trace array may use */
|
|
|
|
static struct tracer *
|
|
|
|
get_tracer_for_array(struct trace_array *tr, struct tracer *t)
|
|
|
|
{
|
|
|
|
while (t && !trace_ok_for_array(t, tr))
|
|
|
|
t = t->next;
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:51 +00:00
|
|
|
static void *
|
2008-05-12 19:20:42 +00:00
|
|
|
t_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
2013-11-07 03:42:48 +00:00
|
|
|
struct trace_array *tr = m->private;
|
2009-06-24 01:53:44 +00:00
|
|
|
struct tracer *t = v;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
(*pos)++;
|
|
|
|
|
|
|
|
if (t)
|
2013-11-07 03:42:48 +00:00
|
|
|
t = get_tracer_for_array(tr, t->next);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *t_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
2013-11-07 03:42:48 +00:00
|
|
|
struct trace_array *tr = m->private;
|
2009-06-24 01:53:44 +00:00
|
|
|
struct tracer *t;
|
2008-05-12 19:20:42 +00:00
|
|
|
loff_t l = 0;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
2013-11-07 03:42:48 +00:00
|
|
|
|
|
|
|
t = get_tracer_for_array(tr, trace_types);
|
|
|
|
for (; t && l < *pos; t = t_next(m, t, &l))
|
|
|
|
;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t_stop(struct seq_file *m, void *p)
|
|
|
|
{
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct tracer *t = v;
|
|
|
|
|
|
|
|
if (!t)
|
|
|
|
return 0;
|
|
|
|
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_puts(m, t->name);
|
2008-05-12 19:20:42 +00:00
|
|
|
if (t->next)
|
|
|
|
seq_putc(m, ' ');
|
|
|
|
else
|
|
|
|
seq_putc(m, '\n');
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-22 23:43:43 +00:00
|
|
|
static const struct seq_operations show_traces_seq_ops = {
|
2008-05-12 19:20:46 +00:00
|
|
|
.start = t_start,
|
|
|
|
.next = t_next,
|
|
|
|
.stop = t_stop,
|
|
|
|
.show = t_show,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int show_traces_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-11-07 03:42:48 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
struct seq_file *m;
|
|
|
|
int ret;
|
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-10-11 22:19:17 +00:00
|
|
|
|
2013-11-07 03:42:48 +00:00
|
|
|
ret = seq_open(file, &show_traces_seq_ops);
|
2019-10-11 22:19:17 +00:00
|
|
|
if (ret) {
|
|
|
|
trace_array_put(tr);
|
2013-11-07 03:42:48 +00:00
|
|
|
return ret;
|
2019-10-11 22:19:17 +00:00
|
|
|
}
|
2013-11-07 03:42:48 +00:00
|
|
|
|
|
|
|
m = file->private_data;
|
|
|
|
m->private = tr;
|
|
|
|
|
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2019-10-11 22:19:17 +00:00
|
|
|
static int show_traces_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
|
|
|
|
trace_array_put(tr);
|
|
|
|
return seq_release(inode, file);
|
|
|
|
}
|
|
|
|
|
2009-03-18 14:40:24 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_write_stub(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-12-21 22:39:40 +00:00
|
|
|
loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
|
2010-11-24 23:13:16 +00:00
|
|
|
{
|
2013-12-21 22:39:40 +00:00
|
|
|
int ret;
|
|
|
|
|
2010-11-24 23:13:16 +00:00
|
|
|
if (file->f_mode & FMODE_READ)
|
2013-12-21 22:39:40 +00:00
|
|
|
ret = seq_lseek(file, offset, whence);
|
2010-11-24 23:13:16 +00:00
|
|
|
else
|
2013-12-21 22:39:40 +00:00
|
|
|
file->f_pos = ret = 0;
|
|
|
|
|
|
|
|
return ret;
|
2010-11-24 23:13:16 +00:00
|
|
|
}
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_fops = {
|
2008-05-12 19:20:46 +00:00
|
|
|
.open = tracing_open,
|
|
|
|
.read = seq_read,
|
2009-03-18 14:40:24 +00:00
|
|
|
.write = tracing_write_stub,
|
2013-12-21 22:39:40 +00:00
|
|
|
.llseek = tracing_lseek,
|
2008-05-12 19:20:46 +00:00
|
|
|
.release = tracing_release,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations show_traces_fops = {
|
2008-05-12 19:20:52 +00:00
|
|
|
.open = show_traces_open,
|
|
|
|
.read = seq_read,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = seq_lseek,
|
2019-10-11 22:19:17 +00:00
|
|
|
.release = show_traces_release,
|
2008-05-12 19:20:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_cpumask_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2013-08-08 16:47:45 +00:00
|
|
|
struct trace_array *tr = file_inode(filp)->i_private;
|
2017-11-30 03:39:43 +00:00
|
|
|
char *mask_str;
|
2008-05-12 19:20:52 +00:00
|
|
|
int len;
|
2008-05-12 19:20:52 +00:00
|
|
|
|
2017-11-30 03:39:43 +00:00
|
|
|
len = snprintf(NULL, 0, "%*pb\n",
|
|
|
|
cpumask_pr_args(tr->tracing_cpumask)) + 1;
|
|
|
|
mask_str = kmalloc(len, GFP_KERNEL);
|
|
|
|
if (!mask_str)
|
|
|
|
return -ENOMEM;
|
2008-05-12 19:20:52 +00:00
|
|
|
|
2017-11-30 03:39:43 +00:00
|
|
|
len = snprintf(mask_str, len, "%*pb\n",
|
2015-02-13 22:37:39 +00:00
|
|
|
cpumask_pr_args(tr->tracing_cpumask));
|
|
|
|
if (len >= count) {
|
2008-05-12 19:20:52 +00:00
|
|
|
count = -EINVAL;
|
|
|
|
goto out_err;
|
|
|
|
}
|
2017-11-30 03:39:43 +00:00
|
|
|
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
|
2008-05-12 19:20:52 +00:00
|
|
|
|
|
|
|
out_err:
|
2017-11-30 03:39:43 +00:00
|
|
|
kfree(mask_str);
|
2008-05-12 19:20:52 +00:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2020-01-10 16:07:16 +00:00
|
|
|
int tracing_set_cpumask(struct trace_array *tr,
|
|
|
|
cpumask_var_t tracing_cpumask_new)
|
2008-05-12 19:20:52 +00:00
|
|
|
{
|
2020-01-10 16:07:16 +00:00
|
|
|
int cpu;
|
2008-05-12 19:20:52 +00:00
|
|
|
|
2020-01-10 16:07:16 +00:00
|
|
|
if (!tr)
|
|
|
|
return -EINVAL;
|
2008-05-12 19:20:52 +00:00
|
|
|
|
2008-12-02 20:34:05 +00:00
|
|
|
local_irq_disable();
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_lock(&tr->max_lock);
|
2008-05-12 19:21:00 +00:00
|
|
|
for_each_tracing_cpu(cpu) {
|
2008-05-12 19:20:52 +00:00
|
|
|
/*
|
|
|
|
* Increase/decrease the disabled counter if we are
|
|
|
|
* about to flip a bit in the cpumask:
|
|
|
|
*/
|
2013-08-08 16:47:45 +00:00
|
|
|
if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
|
2008-12-31 23:42:22 +00:00
|
|
|
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
2020-01-09 23:53:48 +00:00
|
|
|
atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
|
|
|
|
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
|
2008-05-12 19:20:52 +00:00
|
|
|
}
|
2013-08-08 16:47:45 +00:00
|
|
|
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
|
2008-12-31 23:42:22 +00:00
|
|
|
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
2020-01-09 23:53:48 +00:00
|
|
|
atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
|
|
|
|
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
|
2008-05-12 19:20:52 +00:00
|
|
|
}
|
|
|
|
}
|
2014-01-14 15:04:59 +00:00
|
|
|
arch_spin_unlock(&tr->max_lock);
|
2008-12-02 20:34:05 +00:00
|
|
|
local_irq_enable();
|
2008-05-12 19:20:52 +00:00
|
|
|
|
2013-08-08 16:47:45 +00:00
|
|
|
cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
|
2020-01-10 16:07:16 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = file_inode(filp)->i_private;
|
|
|
|
cpumask_var_t tracing_cpumask_new;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
|
|
|
|
if (err)
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
err = tracing_set_cpumask(tr, tracing_cpumask_new);
|
|
|
|
if (err)
|
|
|
|
goto err_free;
|
|
|
|
|
2008-12-31 23:42:22 +00:00
|
|
|
free_cpumask_var(tracing_cpumask_new);
|
2008-05-12 19:20:52 +00:00
|
|
|
|
|
|
|
return count;
|
2008-05-12 19:20:52 +00:00
|
|
|
|
2020-01-10 16:07:16 +00:00
|
|
|
err_free:
|
2009-06-15 02:56:42 +00:00
|
|
|
free_cpumask_var(tracing_cpumask_new);
|
2008-05-12 19:20:52 +00:00
|
|
|
|
|
|
|
return err;
|
2008-05-12 19:20:52 +00:00
|
|
|
}
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_cpumask_fops = {
|
2013-08-08 16:47:45 +00:00
|
|
|
.open = tracing_open_generic_tr,
|
2008-05-12 19:20:52 +00:00
|
|
|
.read = tracing_cpumask_read,
|
|
|
|
.write = tracing_cpumask_write,
|
2013-08-08 16:47:45 +00:00
|
|
|
.release = tracing_release_generic_tr,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
|
|
|
|
2009-12-08 03:15:59 +00:00
|
|
|
static int tracing_trace_options_show(struct seq_file *m, void *v)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2009-02-27 04:55:58 +00:00
|
|
|
struct tracer_opt *trace_opts;
|
2012-05-11 17:29:49 +00:00
|
|
|
struct trace_array *tr = m->private;
|
2009-02-27 04:55:58 +00:00
|
|
|
u32 tracer_flags;
|
|
|
|
int i;
|
2008-11-17 18:23:42 +00:00
|
|
|
|
2009-02-27 04:55:58 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2012-05-11 17:29:49 +00:00
|
|
|
tracer_flags = tr->current_trace->flags->val;
|
|
|
|
trace_opts = tr->current_trace->flags->opts;
|
2009-02-27 04:55:58 +00:00
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
for (i = 0; trace_options[i]; i++) {
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & (1 << i))
|
2009-12-08 03:15:59 +00:00
|
|
|
seq_printf(m, "%s\n", trace_options[i]);
|
2008-05-12 19:20:42 +00:00
|
|
|
else
|
2009-12-08 03:15:59 +00:00
|
|
|
seq_printf(m, "no%s\n", trace_options[i]);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2008-11-17 18:23:42 +00:00
|
|
|
for (i = 0; trace_opts[i].name; i++) {
|
|
|
|
if (tracer_flags & trace_opts[i].bit)
|
2009-12-08 03:15:59 +00:00
|
|
|
seq_printf(m, "%s\n", trace_opts[i].name);
|
2008-11-17 18:23:42 +00:00
|
|
|
else
|
2009-12-08 03:15:59 +00:00
|
|
|
seq_printf(m, "no%s\n", trace_opts[i].name);
|
2008-11-17 18:23:42 +00:00
|
|
|
}
|
2009-02-27 04:55:58 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
2008-11-17 18:23:42 +00:00
|
|
|
|
2009-12-08 03:15:59 +00:00
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 16:13:54 +00:00
|
|
|
static int __set_tracer_option(struct trace_array *tr,
|
2009-12-08 03:17:06 +00:00
|
|
|
struct tracer_flags *tracer_flags,
|
|
|
|
struct tracer_opt *opts, int neg)
|
|
|
|
{
|
2016-03-08 13:37:01 +00:00
|
|
|
struct tracer *trace = tracer_flags->trace;
|
2009-12-08 03:17:06 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2014-01-10 16:13:54 +00:00
|
|
|
ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
|
2009-12-08 03:17:06 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (neg)
|
|
|
|
tracer_flags->val &= ~opts->bit;
|
|
|
|
else
|
|
|
|
tracer_flags->val |= opts->bit;
|
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2008-11-17 18:23:42 +00:00
|
|
|
/* Try to assign a tracer specific option */
|
2014-01-10 16:13:54 +00:00
|
|
|
static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
|
2008-11-17 18:23:42 +00:00
|
|
|
{
|
2014-01-10 16:13:54 +00:00
|
|
|
struct tracer *trace = tr->current_trace;
|
2009-08-07 10:53:21 +00:00
|
|
|
struct tracer_flags *tracer_flags = trace->flags;
|
2008-11-17 18:23:42 +00:00
|
|
|
struct tracer_opt *opts = NULL;
|
2009-12-08 03:17:06 +00:00
|
|
|
int i;
|
2008-11-17 18:23:42 +00:00
|
|
|
|
2009-08-07 10:53:21 +00:00
|
|
|
for (i = 0; tracer_flags->opts[i].name; i++) {
|
|
|
|
opts = &tracer_flags->opts[i];
|
2008-11-17 18:23:42 +00:00
|
|
|
|
2009-12-08 03:17:06 +00:00
|
|
|
if (strcmp(cmp, opts->name) == 0)
|
2014-01-10 16:13:54 +00:00
|
|
|
return __set_tracer_option(tr, trace->flags, opts, neg);
|
2008-11-17 18:23:42 +00:00
|
|
|
}
|
|
|
|
|
2009-12-08 03:17:06 +00:00
|
|
|
return -EINVAL;
|
2008-11-17 18:23:42 +00:00
|
|
|
}
|
|
|
|
|
2013-03-14 19:03:53 +00:00
|
|
|
/* Some tracers require overwrite to stay enabled */
|
|
|
|
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
|
|
|
|
{
|
|
|
|
if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
|
2009-03-17 22:09:55 +00:00
|
|
|
{
|
2019-12-10 09:15:16 +00:00
|
|
|
if ((mask == TRACE_ITER_RECORD_TGID) ||
|
|
|
|
(mask == TRACE_ITER_RECORD_CMD))
|
|
|
|
lockdep_assert_held(&event_mutex);
|
|
|
|
|
2009-03-17 22:09:55 +00:00
|
|
|
/* do nothing if flag is already set */
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!!(tr->trace_flags & mask) == !!enabled)
|
2013-03-14 19:03:53 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Give the tracer a chance to approve the change */
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->current_trace->flag_changed)
|
2014-01-10 22:51:01 +00:00
|
|
|
if (tr->current_trace->flag_changed(tr, mask, !!enabled))
|
2013-03-14 19:03:53 +00:00
|
|
|
return -EINVAL;
|
2009-03-17 22:09:55 +00:00
|
|
|
|
|
|
|
if (enabled)
|
2015-09-30 13:42:05 +00:00
|
|
|
tr->trace_flags |= mask;
|
2009-03-17 22:09:55 +00:00
|
|
|
else
|
2015-09-30 13:42:05 +00:00
|
|
|
tr->trace_flags &= ~mask;
|
2010-07-02 03:07:32 +00:00
|
|
|
|
|
|
|
if (mask == TRACE_ITER_RECORD_CMD)
|
|
|
|
trace_event_enable_cmd_record(enabled);
|
2010-12-08 21:46:47 +00:00
|
|
|
|
2017-06-27 02:01:55 +00:00
|
|
|
if (mask == TRACE_ITER_RECORD_TGID) {
|
|
|
|
if (!tgid_map)
|
2019-10-24 03:34:30 +00:00
|
|
|
tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
sizeof(*tgid_map),
|
2017-06-27 02:01:55 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!tgid_map) {
|
|
|
|
tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_event_enable_tgid_record(enabled);
|
|
|
|
}
|
|
|
|
|
2016-04-13 20:59:18 +00:00
|
|
|
if (mask == TRACE_ITER_EVENT_FORK)
|
|
|
|
trace_event_follow_fork(tr, enabled);
|
|
|
|
|
2017-04-17 02:44:28 +00:00
|
|
|
if (mask == TRACE_ITER_FUNC_FORK)
|
|
|
|
ftrace_pid_follow_fork(tr, enabled);
|
|
|
|
|
2013-03-14 18:20:54 +00:00
|
|
|
if (mask == TRACE_ITER_OVERWRITE) {
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
|
2013-03-14 18:20:54 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
|
2013-03-14 18:20:54 +00:00
|
|
|
#endif
|
|
|
|
}
|
2012-10-11 14:15:05 +00:00
|
|
|
|
2015-09-29 22:21:35 +00:00
|
|
|
if (mask == TRACE_ITER_PRINTK) {
|
2012-10-11 14:15:05 +00:00
|
|
|
trace_printk_start_stop_comm(enabled);
|
2015-09-29 22:21:35 +00:00
|
|
|
trace_printk_control(enabled);
|
|
|
|
}
|
2013-03-14 19:03:53 +00:00
|
|
|
|
|
|
|
return 0;
|
2009-03-17 22:09:55 +00:00
|
|
|
}
|
|
|
|
|
2020-01-10 16:06:17 +00:00
|
|
|
int trace_set_options(struct trace_array *tr, char *option)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2009-12-08 03:17:06 +00:00
|
|
|
char *cmp;
|
2008-05-12 19:20:42 +00:00
|
|
|
int neg = 0;
|
2018-05-17 08:36:03 +00:00
|
|
|
int ret;
|
2015-11-04 01:14:29 +00:00
|
|
|
size_t orig_len = strlen(option);
|
2018-12-22 04:10:26 +00:00
|
|
|
int len;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2012-11-02 02:56:07 +00:00
|
|
|
cmp = strstrip(option);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2018-12-22 04:10:26 +00:00
|
|
|
len = str_has_prefix(cmp, "no");
|
|
|
|
if (len)
|
2008-05-12 19:20:42 +00:00
|
|
|
neg = 1;
|
2018-12-22 04:10:26 +00:00
|
|
|
|
|
|
|
cmp += len;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2019-12-10 09:15:16 +00:00
|
|
|
mutex_lock(&event_mutex);
|
2013-03-14 17:50:56 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2018-05-17 08:36:03 +00:00
|
|
|
ret = match_string(trace_options, -1, cmp);
|
2008-11-17 18:23:42 +00:00
|
|
|
/* If no option could be set, test the specific tracer options */
|
2018-05-17 08:36:03 +00:00
|
|
|
if (ret < 0)
|
2014-01-10 16:13:54 +00:00
|
|
|
ret = set_tracer_option(tr, cmp, neg);
|
2018-05-17 08:36:03 +00:00
|
|
|
else
|
|
|
|
ret = set_tracer_flag(tr, 1 << ret, !neg);
|
2013-03-14 17:50:56 +00:00
|
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
2019-12-10 09:15:16 +00:00
|
|
|
mutex_unlock(&event_mutex);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2015-11-04 01:14:29 +00:00
|
|
|
/*
|
|
|
|
* If the first trailing whitespace is replaced with '\0' by strstrip,
|
|
|
|
* turn it back into a space.
|
|
|
|
*/
|
|
|
|
if (orig_len > strlen(option))
|
|
|
|
option[strlen(option)] = ' ';
|
|
|
|
|
2012-11-02 02:56:07 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-04 01:14:29 +00:00
|
|
|
static void __init apply_trace_boot_options(void)
|
|
|
|
{
|
|
|
|
char *buf = trace_boot_options_buf;
|
|
|
|
char *option;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
option = strsep(&buf, ",");
|
|
|
|
|
|
|
|
if (!option)
|
|
|
|
break;
|
|
|
|
|
tracing: Put back comma for empty fields in boot string parsing
Both early_enable_events() and apply_trace_boot_options() parse a boot
string that may get parsed later on. They both use strsep() which converts a
comma into a nul character. To still allow the boot string to be parsed
again the same way, the nul character gets converted back to a comma after
the token is processed.
The problem is that these two functions check for an empty parameter (two
commas in a row ",,"), and continue the loop if the parameter is empty, but
fails to place the comma back. In this case, the second parsing will end at
this blank field, and not process fields afterward.
In most cases, users should not have an empty field, but if its going to be
checked, the code might as well be correct.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-11-04 03:15:14 +00:00
|
|
|
if (*option)
|
|
|
|
trace_set_options(&global_trace, option);
|
2015-11-04 01:14:29 +00:00
|
|
|
|
|
|
|
/* Put back the comma to allow this to be called again */
|
|
|
|
if (buf)
|
|
|
|
*(buf - 1) = ',';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-02 02:56:07 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct trace_array *tr = m->private;
|
2012-11-02 02:56:07 +00:00
|
|
|
char buf[64];
|
2013-03-14 19:03:53 +00:00
|
|
|
int ret;
|
2012-11-02 02:56:07 +00:00
|
|
|
|
|
|
|
if (cnt >= sizeof(buf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-04-18 07:23:29 +00:00
|
|
|
if (copy_from_user(buf, ubuf, cnt))
|
2012-11-02 02:56:07 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2013-01-10 01:54:17 +00:00
|
|
|
buf[cnt] = 0;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
ret = trace_set_options(tr, buf);
|
2013-03-14 19:03:53 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2012-11-02 02:56:07 +00:00
|
|
|
|
2009-10-23 23:36:16 +00:00
|
|
|
*ppos += cnt;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2009-12-08 03:15:59 +00:00
|
|
|
static int tracing_trace_options_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-07-02 03:34:22 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2013-07-18 18:18:44 +00:00
|
|
|
int ret;
|
2013-07-02 03:34:22 +00:00
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-02 03:34:22 +00:00
|
|
|
|
2013-07-18 18:18:44 +00:00
|
|
|
ret = single_open(file, tracing_trace_options_show, inode->i_private);
|
|
|
|
if (ret < 0)
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
|
|
|
return ret;
|
2009-12-08 03:15:59 +00:00
|
|
|
}
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_iter_fops = {
|
2009-12-08 03:15:59 +00:00
|
|
|
.open = tracing_trace_options_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2013-07-02 03:34:22 +00:00
|
|
|
.release = tracing_single_release_tr,
|
2008-11-12 22:52:37 +00:00
|
|
|
.write = tracing_trace_options_write,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
|
|
|
|
2008-05-12 19:20:45 +00:00
|
|
|
static const char readme_msg[] =
|
|
|
|
"tracing mini-HOWTO:\n\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
"# echo 0 > tracing_on : quick way to disable tracing\n"
|
|
|
|
"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
|
|
|
|
" Important files:\n"
|
|
|
|
" trace\t\t\t- The static contents of the buffer\n"
|
|
|
|
"\t\t\t To clear the buffer write into this file: echo > trace\n"
|
|
|
|
" trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
|
|
|
|
" current_tracer\t- function and latency tracers\n"
|
|
|
|
" available_tracers\t- list of configured tracers for current_tracer\n"
|
2019-03-31 23:48:25 +00:00
|
|
|
" error_log\t- error log for failed commands (that support it)\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
" buffer_size_kb\t- view and modify size of per cpu buffer\n"
|
|
|
|
" buffer_total_size_kb - view total size of all cpu buffers\n\n"
|
|
|
|
" trace_clock\t\t-change the clock used to order events\n"
|
|
|
|
" local: Per cpu clock but may not be synced across CPUs\n"
|
|
|
|
" global: Synced across CPUs but slows tracing down.\n"
|
|
|
|
" counter: Not a clock, but just an increment\n"
|
|
|
|
" uptime: Jiffy counter from time of boot\n"
|
|
|
|
" perf: Same clock that perf events use\n"
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
" x86-tsc: TSC cycle counter\n"
|
|
|
|
#endif
|
2018-01-16 02:51:41 +00:00
|
|
|
"\n timestamp_mode\t-view the mode used to timestamp events\n"
|
|
|
|
" delta: Delta difference against a buffer-wide timestamp\n"
|
|
|
|
" absolute: Absolute (standalone) timestamp\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
"\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
|
2016-07-06 19:25:08 +00:00
|
|
|
"\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
" tracing_cpumask\t- Limit which CPUs to trace\n"
|
|
|
|
" instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
|
|
|
|
"\t\t\t Remove sub-buffer with rmdir\n"
|
|
|
|
" trace_options\t\t- Set format or modify how tracing happens\n"
|
2019-01-29 01:55:53 +00:00
|
|
|
"\t\t\t Disable an option by prefixing 'no' to the\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t\t\t option name\n"
|
2014-06-05 01:24:27 +00:00
|
|
|
" saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
"\n available_filter_functions - list of functions that can be filtered on\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
" set_ftrace_filter\t- echo function name in here to only trace these\n"
|
|
|
|
"\t\t\t functions\n"
|
2016-10-05 11:58:15 +00:00
|
|
|
"\t accepts: func_full_name or glob-matching-pattern\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t modules: Can select a group via module\n"
|
|
|
|
"\t Format: :mod:<module-name>\n"
|
|
|
|
"\t example: echo :mod:ext3 > set_ftrace_filter\n"
|
|
|
|
"\t triggers: a command to perform when function is hit\n"
|
|
|
|
"\t Format: <function>:<trigger>[:count]\n"
|
|
|
|
"\t trigger: traceon, traceoff\n"
|
|
|
|
"\t\t enable_event:<system>:<event>\n"
|
|
|
|
"\t\t disable_event:<system>:<event>\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t\t stacktrace\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t\t snapshot\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#endif
|
2014-04-11 02:43:37 +00:00
|
|
|
"\t\t dump\n"
|
|
|
|
"\t\t cpudump\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t example: echo do_fault:traceoff > set_ftrace_filter\n"
|
|
|
|
"\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
|
|
|
|
"\t The first one will disable tracing every time do_fault is hit\n"
|
|
|
|
"\t The second will disable tracing at most 3 times when do_trap is hit\n"
|
|
|
|
"\t The first time do trap is hit and it disables tracing, the\n"
|
|
|
|
"\t counter will decrement to 2. If tracing is already disabled,\n"
|
|
|
|
"\t the counter will not decrement. It only decrements when the\n"
|
|
|
|
"\t trigger did work\n"
|
|
|
|
"\t To remove trigger without count:\n"
|
|
|
|
"\t echo '!<function>:<trigger> > set_ftrace_filter\n"
|
|
|
|
"\t To remove trigger with a count:\n"
|
|
|
|
"\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
" set_ftrace_notrace\t- echo function name in here to never trace.\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
|
|
|
|
"\t modules: Can select a group via module command :mod:\n"
|
|
|
|
"\t Does not accept triggers\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2014-01-23 05:10:04 +00:00
|
|
|
" set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
|
|
|
|
"\t\t (function)\n"
|
2020-03-20 03:19:06 +00:00
|
|
|
" set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
|
|
|
|
"\t\t (function)\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
" set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
|
2014-06-12 16:23:53 +00:00
|
|
|
" set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
" max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
2014-01-23 05:10:04 +00:00
|
|
|
"\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
|
|
|
|
"\t\t\t snapshot buffer. Read the contents for more\n"
|
|
|
|
"\t\t\t information\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#endif
|
2013-07-15 08:32:34 +00:00
|
|
|
#ifdef CONFIG_STACK_TRACER
|
2013-03-15 21:23:20 +00:00
|
|
|
" stack_trace\t\t- Shows the max stack trace when active\n"
|
|
|
|
" stack_max_size\t- Shows current max stack size that was traced\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t\t\t Write into this file to reset the max size (trigger a\n"
|
|
|
|
"\t\t\t new trace)\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2014-01-23 05:10:04 +00:00
|
|
|
" stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
|
|
|
|
"\t\t\t traces\n"
|
2013-03-15 21:23:20 +00:00
|
|
|
#endif
|
2013-07-15 08:32:34 +00:00
|
|
|
#endif /* CONFIG_STACK_TRACER */
|
2018-11-05 09:02:08 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_EVENTS
|
2019-06-19 15:07:49 +00:00
|
|
|
" dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
|
2018-11-05 09:02:08 +00:00
|
|
|
"\t\t\t Write into this file to define/undefine new trace events.\n"
|
|
|
|
#endif
|
2017-02-16 06:00:50 +00:00
|
|
|
#ifdef CONFIG_KPROBE_EVENTS
|
2019-06-19 15:07:49 +00:00
|
|
|
" kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
|
2016-08-18 08:58:15 +00:00
|
|
|
"\t\t\t Write into this file to define/undefine new trace events.\n"
|
|
|
|
#endif
|
2017-02-16 06:00:50 +00:00
|
|
|
#ifdef CONFIG_UPROBE_EVENTS
|
2019-06-19 15:07:58 +00:00
|
|
|
" uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
|
2016-08-18 08:58:15 +00:00
|
|
|
"\t\t\t Write into this file to define/undefine new trace events.\n"
|
|
|
|
#endif
|
2017-02-16 06:00:50 +00:00
|
|
|
#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
|
2016-08-18 08:58:15 +00:00
|
|
|
"\t accepts: event-definitions (one definition per line)\n"
|
2017-05-23 06:05:50 +00:00
|
|
|
"\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
|
|
|
|
"\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
|
2018-11-05 09:03:33 +00:00
|
|
|
#ifdef CONFIG_HIST_TRIGGERS
|
|
|
|
"\t s:[synthetic/]<event> <field> [<field>]\n"
|
|
|
|
#endif
|
2016-08-18 08:58:15 +00:00
|
|
|
"\t -:[<group>/]<event>\n"
|
2017-02-16 06:00:50 +00:00
|
|
|
#ifdef CONFIG_KPROBE_EVENTS
|
2016-08-18 08:58:15 +00:00
|
|
|
"\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
|
2017-02-22 13:53:39 +00:00
|
|
|
"place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
|
2016-08-18 08:58:15 +00:00
|
|
|
#endif
|
2017-02-16 06:00:50 +00:00
|
|
|
#ifdef CONFIG_UPROBE_EVENTS
|
uprobes: Support SDT markers having reference count (semaphore)
Userspace Statically Defined Tracepoints[1] are dtrace style markers
inside userspace applications. Applications like PostgreSQL, MySQL,
Pthread, Perl, Python, Java, Ruby, Node.js, libvirt, QEMU, glib etc
have these markers embedded in them. These markers are added by developer
at important places in the code. Each marker source expands to a single
nop instruction in the compiled code but there may be additional
overhead for computing the marker arguments which expands to couple of
instructions. In case the overhead is more, execution of it can be
omitted by runtime if() condition when no one is tracing on the marker:
if (reference_counter > 0) {
Execute marker instructions;
}
Default value of reference counter is 0. Tracer has to increment the
reference counter before tracing on a marker and decrement it when
done with the tracing.
Implement the reference counter logic in core uprobe. User will be
able to use it from trace_uprobe as well as from kernel module. New
trace_uprobe definition with reference counter will now be:
<path>:<offset>[(ref_ctr_offset)]
where ref_ctr_offset is an optional field. For kernel module, new
variant of uprobe_register() has been introduced:
uprobe_register_refctr(inode, offset, ref_ctr_offset, consumer)
No new variant for uprobe_unregister() because it's assumed to have
only one reference counter for one uprobe.
[1] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
Note: 'reference counter' is called as 'semaphore' in original Dtrace
(or Systemtap, bcc and even in ELF) documentation and code. But the
term 'semaphore' is misleading in this context. This is just a counter
used to hold number of tracers tracing on a marker. This is not really
used for any synchronization. So we are calling it a 'reference counter'
in kernel / perf code.
Link: http://lkml.kernel.org/r/20180820044250.11659-2-ravi.bangoria@linux.ibm.com
Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
[Only trace_uprobe.c]
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Song Liu <songliubraving@fb.com>
Tested-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2018-08-20 04:42:47 +00:00
|
|
|
" place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
|
2016-08-18 08:58:15 +00:00
|
|
|
#endif
|
|
|
|
"\t args: <name>=fetcharg[:type]\n"
|
|
|
|
"\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
|
2018-04-25 12:21:26 +00:00
|
|
|
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
|
2019-05-15 05:38:42 +00:00
|
|
|
"\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
|
2018-04-25 12:21:26 +00:00
|
|
|
#else
|
2019-05-15 05:38:42 +00:00
|
|
|
"\t $stack<index>, $stack, $retval, $comm,\n"
|
2018-04-25 12:21:26 +00:00
|
|
|
#endif
|
2019-06-19 15:08:37 +00:00
|
|
|
"\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
|
2018-04-25 12:20:28 +00:00
|
|
|
"\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
|
2019-05-15 05:38:30 +00:00
|
|
|
"\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
|
2018-04-25 12:21:55 +00:00
|
|
|
"\t <type>\\[<array-size>\\]\n"
|
2018-11-05 09:03:33 +00:00
|
|
|
#ifdef CONFIG_HIST_TRIGGERS
|
|
|
|
"\t field: <stype> <name>;\n"
|
|
|
|
"\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
|
|
|
|
"\t [unsigned] char/int/long\n"
|
|
|
|
#endif
|
2016-08-18 08:58:15 +00:00
|
|
|
#endif
|
2014-01-17 21:11:44 +00:00
|
|
|
" events/\t\t- Directory containing all trace event subsystems:\n"
|
|
|
|
" enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
|
|
|
|
" events/<system>/\t- Directory containing all trace events for <system>:\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
" enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
|
|
|
|
"\t\t\t events\n"
|
2014-01-17 21:11:44 +00:00
|
|
|
" filter\t\t- If set, only events passing filter are traced\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
" events/<system>/<event>/\t- Directory containing control files for\n"
|
|
|
|
"\t\t\t <event>:\n"
|
2014-01-17 21:11:44 +00:00
|
|
|
" enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
|
|
|
|
" filter\t\t- If set, only events passing filter are traced\n"
|
|
|
|
" trigger\t\t- If set, a command to perform when event is hit\n"
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t Format: <trigger>[:count][if <filter>]\n"
|
|
|
|
"\t trigger: traceon, traceoff\n"
|
|
|
|
"\t enable_event:<system>:<event>\n"
|
|
|
|
"\t disable_event:<system>:<event>\n"
|
2016-03-03 18:54:55 +00:00
|
|
|
#ifdef CONFIG_HIST_TRIGGERS
|
|
|
|
"\t enable_hist:<system>:<event>\n"
|
|
|
|
"\t disable_hist:<system>:<event>\n"
|
|
|
|
#endif
|
2014-01-17 21:11:44 +00:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t\t stacktrace\n"
|
2014-01-17 21:11:44 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t\t snapshot\n"
|
tracing: Add 'hist' event trigger command
'hist' triggers allow users to continually aggregate trace events,
which can then be viewed afterwards by simply reading a 'hist' file
containing the aggregation in a human-readable format.
The basic idea is very simple and boils down to a mechanism whereby
trace events, rather than being exhaustively dumped in raw form and
viewed directly, are automatically 'compressed' into meaningful tables
completely defined by the user.
This is done strictly via single-line command-line commands and
without the aid of any kind of programming language or interpreter.
A surprising number of typical use cases can be accomplished by users
via this simple mechanism. In fact, a large number of the tasks that
users typically do using the more complicated script-based tracing
tools, at least during the initial stages of an investigation, can be
accomplished by simply specifying a set of keys and values to be used
in the creation of a hash table.
The Linux kernel trace event subsystem happens to provide an extensive
list of keys and values ready-made for such a purpose in the form of
the event format files associated with each trace event. By simply
consulting the format file for field names of interest and by plugging
them into the hist trigger command, users can create an endless number
of useful aggregations to help with investigating various properties
of the system. See Documentation/trace/events.txt for examples.
hist triggers are implemented on top of the existing event trigger
infrastructure, and as such are consistent with the existing triggers
from a user's perspective as well.
The basic syntax follows the existing trigger syntax. Users start an
aggregation by writing a 'hist' trigger to the event of interest's
trigger file:
# echo hist:keys=xxx [ if filter] > event/trigger
Once a hist trigger has been set up, by default it continually
aggregates every matching event into a hash table using the event key
and a value field named 'hitcount'.
To view the aggregation at any point in time, simply read the 'hist'
file in the same directory as the 'trigger' file:
# cat event/hist
The detailed syntax provides additional options for user control, and
is described exhaustively in Documentation/trace/events.txt and in the
virtual tracing/README file in the tracing subsystem.
Link: http://lkml.kernel.org/r/72d263b5e1853fe9c314953b65833c3aa75479f2.1457029949.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2016-03-03 18:54:42 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIST_TRIGGERS
|
|
|
|
"\t\t hist (see below)\n"
|
2014-01-17 21:11:44 +00:00
|
|
|
#endif
|
2014-01-23 05:10:04 +00:00
|
|
|
"\t example: echo traceoff > events/block/block_unplug/trigger\n"
|
|
|
|
"\t echo traceoff:3 > events/block/block_unplug/trigger\n"
|
|
|
|
"\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
|
|
|
|
"\t events/block/block_unplug/trigger\n"
|
|
|
|
"\t The first disables tracing every time block_unplug is hit.\n"
|
|
|
|
"\t The second disables tracing the first 3 times block_unplug is hit.\n"
|
|
|
|
"\t The third enables the kmalloc event the first 3 times block_unplug\n"
|
|
|
|
"\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
|
|
|
|
"\t Like function triggers, the counter is only decremented if it\n"
|
|
|
|
"\t enabled or disabled tracing.\n"
|
|
|
|
"\t To remove a trigger without a count:\n"
|
|
|
|
"\t echo '!<trigger> > <system>/<event>/trigger\n"
|
|
|
|
"\t To remove a trigger with a count:\n"
|
|
|
|
"\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
|
|
|
|
"\t Filters can be ignored when removing a trigger.\n"
|
tracing: Add 'hist' event trigger command
'hist' triggers allow users to continually aggregate trace events,
which can then be viewed afterwards by simply reading a 'hist' file
containing the aggregation in a human-readable format.
The basic idea is very simple and boils down to a mechanism whereby
trace events, rather than being exhaustively dumped in raw form and
viewed directly, are automatically 'compressed' into meaningful tables
completely defined by the user.
This is done strictly via single-line command-line commands and
without the aid of any kind of programming language or interpreter.
A surprising number of typical use cases can be accomplished by users
via this simple mechanism. In fact, a large number of the tasks that
users typically do using the more complicated script-based tracing
tools, at least during the initial stages of an investigation, can be
accomplished by simply specifying a set of keys and values to be used
in the creation of a hash table.
The Linux kernel trace event subsystem happens to provide an extensive
list of keys and values ready-made for such a purpose in the form of
the event format files associated with each trace event. By simply
consulting the format file for field names of interest and by plugging
them into the hist trigger command, users can create an endless number
of useful aggregations to help with investigating various properties
of the system. See Documentation/trace/events.txt for examples.
hist triggers are implemented on top of the existing event trigger
infrastructure, and as such are consistent with the existing triggers
from a user's perspective as well.
The basic syntax follows the existing trigger syntax. Users start an
aggregation by writing a 'hist' trigger to the event of interest's
trigger file:
# echo hist:keys=xxx [ if filter] > event/trigger
Once a hist trigger has been set up, by default it continually
aggregates every matching event into a hash table using the event key
and a value field named 'hitcount'.
To view the aggregation at any point in time, simply read the 'hist'
file in the same directory as the 'trigger' file:
# cat event/hist
The detailed syntax provides additional options for user control, and
is described exhaustively in Documentation/trace/events.txt and in the
virtual tracing/README file in the tracing subsystem.
Link: http://lkml.kernel.org/r/72d263b5e1853fe9c314953b65833c3aa75479f2.1457029949.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2016-03-03 18:54:42 +00:00
|
|
|
#ifdef CONFIG_HIST_TRIGGERS
|
|
|
|
" hist trigger\t- If set, event hits are aggregated into a hash table\n"
|
2016-03-03 18:54:44 +00:00
|
|
|
"\t Format: hist:keys=<field1[,field2,...]>\n"
|
2016-03-03 18:54:43 +00:00
|
|
|
"\t [:values=<field1[,field2,...]>]\n"
|
2016-03-03 18:54:45 +00:00
|
|
|
"\t [:sort=<field1[,field2,...]>]\n"
|
tracing: Add 'hist' event trigger command
'hist' triggers allow users to continually aggregate trace events,
which can then be viewed afterwards by simply reading a 'hist' file
containing the aggregation in a human-readable format.
The basic idea is very simple and boils down to a mechanism whereby
trace events, rather than being exhaustively dumped in raw form and
viewed directly, are automatically 'compressed' into meaningful tables
completely defined by the user.
This is done strictly via single-line command-line commands and
without the aid of any kind of programming language or interpreter.
A surprising number of typical use cases can be accomplished by users
via this simple mechanism. In fact, a large number of the tasks that
users typically do using the more complicated script-based tracing
tools, at least during the initial stages of an investigation, can be
accomplished by simply specifying a set of keys and values to be used
in the creation of a hash table.
The Linux kernel trace event subsystem happens to provide an extensive
list of keys and values ready-made for such a purpose in the form of
the event format files associated with each trace event. By simply
consulting the format file for field names of interest and by plugging
them into the hist trigger command, users can create an endless number
of useful aggregations to help with investigating various properties
of the system. See Documentation/trace/events.txt for examples.
hist triggers are implemented on top of the existing event trigger
infrastructure, and as such are consistent with the existing triggers
from a user's perspective as well.
The basic syntax follows the existing trigger syntax. Users start an
aggregation by writing a 'hist' trigger to the event of interest's
trigger file:
# echo hist:keys=xxx [ if filter] > event/trigger
Once a hist trigger has been set up, by default it continually
aggregates every matching event into a hash table using the event key
and a value field named 'hitcount'.
To view the aggregation at any point in time, simply read the 'hist'
file in the same directory as the 'trigger' file:
# cat event/hist
The detailed syntax provides additional options for user control, and
is described exhaustively in Documentation/trace/events.txt and in the
virtual tracing/README file in the tracing subsystem.
Link: http://lkml.kernel.org/r/72d263b5e1853fe9c314953b65833c3aa75479f2.1457029949.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2016-03-03 18:54:42 +00:00
|
|
|
"\t [:size=#entries]\n"
|
2016-03-03 18:54:47 +00:00
|
|
|
"\t [:pause][:continue][:clear]\n"
|
2016-03-03 18:54:59 +00:00
|
|
|
"\t [:name=histname1]\n"
|
2019-02-13 23:42:43 +00:00
|
|
|
"\t [:<handler>.<action>]\n"
|
tracing: Add 'hist' event trigger command
'hist' triggers allow users to continually aggregate trace events,
which can then be viewed afterwards by simply reading a 'hist' file
containing the aggregation in a human-readable format.
The basic idea is very simple and boils down to a mechanism whereby
trace events, rather than being exhaustively dumped in raw form and
viewed directly, are automatically 'compressed' into meaningful tables
completely defined by the user.
This is done strictly via single-line command-line commands and
without the aid of any kind of programming language or interpreter.
A surprising number of typical use cases can be accomplished by users
via this simple mechanism. In fact, a large number of the tasks that
users typically do using the more complicated script-based tracing
tools, at least during the initial stages of an investigation, can be
accomplished by simply specifying a set of keys and values to be used
in the creation of a hash table.
The Linux kernel trace event subsystem happens to provide an extensive
list of keys and values ready-made for such a purpose in the form of
the event format files associated with each trace event. By simply
consulting the format file for field names of interest and by plugging
them into the hist trigger command, users can create an endless number
of useful aggregations to help with investigating various properties
of the system. See Documentation/trace/events.txt for examples.
hist triggers are implemented on top of the existing event trigger
infrastructure, and as such are consistent with the existing triggers
from a user's perspective as well.
The basic syntax follows the existing trigger syntax. Users start an
aggregation by writing a 'hist' trigger to the event of interest's
trigger file:
# echo hist:keys=xxx [ if filter] > event/trigger
Once a hist trigger has been set up, by default it continually
aggregates every matching event into a hash table using the event key
and a value field named 'hitcount'.
To view the aggregation at any point in time, simply read the 'hist'
file in the same directory as the 'trigger' file:
# cat event/hist
The detailed syntax provides additional options for user control, and
is described exhaustively in Documentation/trace/events.txt and in the
virtual tracing/README file in the tracing subsystem.
Link: http://lkml.kernel.org/r/72d263b5e1853fe9c314953b65833c3aa75479f2.1457029949.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2016-03-03 18:54:42 +00:00
|
|
|
"\t [if <filter>]\n\n"
|
|
|
|
"\t When a matching event is hit, an entry is added to a hash\n"
|
2016-03-03 18:54:43 +00:00
|
|
|
"\t table using the key(s) and value(s) named, and the value of a\n"
|
|
|
|
"\t sum called 'hitcount' is incremented. Keys and values\n"
|
|
|
|
"\t correspond to fields in the event's format description. Keys\n"
|
2016-03-03 18:54:52 +00:00
|
|
|
"\t can be any field, or the special string 'stacktrace'.\n"
|
|
|
|
"\t Compound keys consisting of up to two fields can be specified\n"
|
|
|
|
"\t by the 'keys' keyword. Values must correspond to numeric\n"
|
|
|
|
"\t fields. Sort keys consisting of up to two fields can be\n"
|
|
|
|
"\t specified using the 'sort' keyword. The sort direction can\n"
|
|
|
|
"\t be modified by appending '.descending' or '.ascending' to a\n"
|
|
|
|
"\t sort field. The 'size' parameter can be used to specify more\n"
|
2016-03-03 18:54:59 +00:00
|
|
|
"\t or fewer than the default 2048 entries for the hashtable size.\n"
|
|
|
|
"\t If a hist trigger is given a name using the 'name' parameter,\n"
|
|
|
|
"\t its histogram data will be shared with other triggers of the\n"
|
|
|
|
"\t same name, and trigger hits will update this common data.\n\n"
|
tracing: Add 'hist' event trigger command
'hist' triggers allow users to continually aggregate trace events,
which can then be viewed afterwards by simply reading a 'hist' file
containing the aggregation in a human-readable format.
The basic idea is very simple and boils down to a mechanism whereby
trace events, rather than being exhaustively dumped in raw form and
viewed directly, are automatically 'compressed' into meaningful tables
completely defined by the user.
This is done strictly via single-line command-line commands and
without the aid of any kind of programming language or interpreter.
A surprising number of typical use cases can be accomplished by users
via this simple mechanism. In fact, a large number of the tasks that
users typically do using the more complicated script-based tracing
tools, at least during the initial stages of an investigation, can be
accomplished by simply specifying a set of keys and values to be used
in the creation of a hash table.
The Linux kernel trace event subsystem happens to provide an extensive
list of keys and values ready-made for such a purpose in the form of
the event format files associated with each trace event. By simply
consulting the format file for field names of interest and by plugging
them into the hist trigger command, users can create an endless number
of useful aggregations to help with investigating various properties
of the system. See Documentation/trace/events.txt for examples.
hist triggers are implemented on top of the existing event trigger
infrastructure, and as such are consistent with the existing triggers
from a user's perspective as well.
The basic syntax follows the existing trigger syntax. Users start an
aggregation by writing a 'hist' trigger to the event of interest's
trigger file:
# echo hist:keys=xxx [ if filter] > event/trigger
Once a hist trigger has been set up, by default it continually
aggregates every matching event into a hash table using the event key
and a value field named 'hitcount'.
To view the aggregation at any point in time, simply read the 'hist'
file in the same directory as the 'trigger' file:
# cat event/hist
The detailed syntax provides additional options for user control, and
is described exhaustively in Documentation/trace/events.txt and in the
virtual tracing/README file in the tracing subsystem.
Link: http://lkml.kernel.org/r/72d263b5e1853fe9c314953b65833c3aa75479f2.1457029949.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2016-03-03 18:54:42 +00:00
|
|
|
"\t Reading the 'hist' file for the event will dump the hash\n"
|
2016-03-03 18:54:57 +00:00
|
|
|
"\t table in its entirety to stdout. If there are multiple hist\n"
|
|
|
|
"\t triggers attached to an event, there will be a table for each\n"
|
2016-03-03 18:54:59 +00:00
|
|
|
"\t trigger in the output. The table displayed for a named\n"
|
|
|
|
"\t trigger will be the same as any other instance having the\n"
|
|
|
|
"\t same name. The default format used to display a given field\n"
|
|
|
|
"\t can be modified by appending any of the following modifiers\n"
|
|
|
|
"\t to the field name, as applicable:\n\n"
|
2016-03-03 18:54:49 +00:00
|
|
|
"\t .hex display a number as a hex value\n"
|
|
|
|
"\t .sym display an address as a symbol\n"
|
2016-03-03 18:54:50 +00:00
|
|
|
"\t .sym-offset display an address as a symbol and offset\n"
|
2016-03-03 18:54:51 +00:00
|
|
|
"\t .execname display a common_pid as a program name\n"
|
2018-01-16 02:51:48 +00:00
|
|
|
"\t .syscall display a syscall id as a syscall name\n"
|
|
|
|
"\t .log2 display log2 value rather than raw number\n"
|
|
|
|
"\t .usecs display a common_timestamp in microseconds\n\n"
|
2016-03-03 18:54:46 +00:00
|
|
|
"\t The 'pause' parameter can be used to pause an existing hist\n"
|
|
|
|
"\t trigger or to start a hist trigger but not log any events\n"
|
|
|
|
"\t until told to do so. 'continue' can be used to start or\n"
|
|
|
|
"\t restart a paused hist trigger.\n\n"
|
2016-03-03 18:54:47 +00:00
|
|
|
"\t The 'clear' parameter will clear the contents of a running\n"
|
|
|
|
"\t hist trigger and leave its current paused/active state\n"
|
|
|
|
"\t unchanged.\n\n"
|
2016-03-03 18:54:55 +00:00
|
|
|
"\t The enable_hist and disable_hist triggers can be used to\n"
|
|
|
|
"\t have one event conditionally start and stop another event's\n"
|
2019-02-17 22:32:22 +00:00
|
|
|
"\t already-attached hist trigger. The syntax is analogous to\n"
|
2019-02-13 23:42:43 +00:00
|
|
|
"\t the enable_event and disable_event triggers.\n\n"
|
|
|
|
"\t Hist trigger handlers and actions are executed whenever a\n"
|
|
|
|
"\t a histogram entry is added or updated. They take the form:\n\n"
|
|
|
|
"\t <handler>.<action>\n\n"
|
|
|
|
"\t The available handlers are:\n\n"
|
|
|
|
"\t onmatch(matching.event) - invoke on addition or update\n"
|
2019-02-13 23:42:48 +00:00
|
|
|
"\t onmax(var) - invoke if var exceeds current max\n"
|
|
|
|
"\t onchange(var) - invoke action if var changes\n\n"
|
2019-02-13 23:42:43 +00:00
|
|
|
"\t The available actions are:\n\n"
|
2019-02-13 23:42:50 +00:00
|
|
|
"\t trace(<synthetic_event>,param list) - generate synthetic event\n"
|
2019-02-13 23:42:43 +00:00
|
|
|
"\t save(field,...) - save current event fields\n"
|
2019-02-13 23:42:46 +00:00
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
|
|
"\t snapshot() - snapshot the trace buffer\n"
|
|
|
|
#endif
|
tracing: Add 'hist' event trigger command
'hist' triggers allow users to continually aggregate trace events,
which can then be viewed afterwards by simply reading a 'hist' file
containing the aggregation in a human-readable format.
The basic idea is very simple and boils down to a mechanism whereby
trace events, rather than being exhaustively dumped in raw form and
viewed directly, are automatically 'compressed' into meaningful tables
completely defined by the user.
This is done strictly via single-line command-line commands and
without the aid of any kind of programming language or interpreter.
A surprising number of typical use cases can be accomplished by users
via this simple mechanism. In fact, a large number of the tasks that
users typically do using the more complicated script-based tracing
tools, at least during the initial stages of an investigation, can be
accomplished by simply specifying a set of keys and values to be used
in the creation of a hash table.
The Linux kernel trace event subsystem happens to provide an extensive
list of keys and values ready-made for such a purpose in the form of
the event format files associated with each trace event. By simply
consulting the format file for field names of interest and by plugging
them into the hist trigger command, users can create an endless number
of useful aggregations to help with investigating various properties
of the system. See Documentation/trace/events.txt for examples.
hist triggers are implemented on top of the existing event trigger
infrastructure, and as such are consistent with the existing triggers
from a user's perspective as well.
The basic syntax follows the existing trigger syntax. Users start an
aggregation by writing a 'hist' trigger to the event of interest's
trigger file:
# echo hist:keys=xxx [ if filter] > event/trigger
Once a hist trigger has been set up, by default it continually
aggregates every matching event into a hash table using the event key
and a value field named 'hitcount'.
To view the aggregation at any point in time, simply read the 'hist'
file in the same directory as the 'trigger' file:
# cat event/hist
The detailed syntax provides additional options for user control, and
is described exhaustively in Documentation/trace/events.txt and in the
virtual tracing/README file in the tracing subsystem.
Link: http://lkml.kernel.org/r/72d263b5e1853fe9c314953b65833c3aa75479f2.1457029949.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2016-03-03 18:54:42 +00:00
|
|
|
#endif
|
2008-05-12 19:20:45 +00:00
|
|
|
;
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_readme_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos,
|
|
|
|
readme_msg, strlen(readme_msg));
|
|
|
|
}
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_readme_fops = {
|
2008-05-12 19:20:52 +00:00
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = tracing_readme_read,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2008-05-12 19:20:45 +00:00
|
|
|
};
|
|
|
|
|
2017-07-06 04:07:15 +00:00
|
|
|
static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
int *ptr = v;
|
|
|
|
|
|
|
|
if (*pos || m->count)
|
|
|
|
ptr++;
|
|
|
|
|
|
|
|
(*pos)++;
|
|
|
|
|
|
|
|
for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
|
|
|
|
if (trace_find_tgid(*ptr))
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
|
|
|
void *v;
|
|
|
|
loff_t l = 0;
|
|
|
|
|
|
|
|
if (!tgid_map)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
v = &tgid_map[0];
|
|
|
|
while (l <= *pos) {
|
|
|
|
v = saved_tgids_next(m, v, &l);
|
|
|
|
if (!v)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void saved_tgids_stop(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int saved_tgids_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
int pid = (int *)v - tgid_map;
|
|
|
|
|
|
|
|
seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations tracing_saved_tgids_seq_ops = {
|
|
|
|
.start = saved_tgids_start,
|
|
|
|
.stop = saved_tgids_stop,
|
|
|
|
.next = saved_tgids_next,
|
|
|
|
.show = saved_tgids_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tracing_check_open_get_tr(NULL);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-07-06 04:07:15 +00:00
|
|
|
|
|
|
|
return seq_open(filp, &tracing_saved_tgids_seq_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static const struct file_operations tracing_saved_tgids_fops = {
|
|
|
|
.open = tracing_saved_tgids_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2014-02-20 08:44:31 +00:00
|
|
|
static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
unsigned int *ptr = v;
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-02-20 08:44:31 +00:00
|
|
|
if (*pos || m->count)
|
|
|
|
ptr++;
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-02-20 08:44:31 +00:00
|
|
|
(*pos)++;
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
|
|
|
|
ptr++) {
|
2014-02-20 08:44:31 +00:00
|
|
|
if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
|
|
|
|
continue;
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-02-20 08:44:31 +00:00
|
|
|
return ptr;
|
|
|
|
}
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-02-20 08:44:31 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
|
|
|
void *v;
|
|
|
|
loff_t l = 0;
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-05-30 14:49:46 +00:00
|
|
|
preempt_disable();
|
|
|
|
arch_spin_lock(&trace_cmdline_lock);
|
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
v = &savedcmd->map_cmdline_to_pid[0];
|
2014-02-20 08:44:31 +00:00
|
|
|
while (l <= *pos) {
|
|
|
|
v = saved_cmdlines_next(m, v, &l);
|
|
|
|
if (!v)
|
|
|
|
return NULL;
|
2009-04-10 20:04:48 +00:00
|
|
|
}
|
|
|
|
|
2014-02-20 08:44:31 +00:00
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void saved_cmdlines_stop(struct seq_file *m, void *v)
|
|
|
|
{
|
2014-05-30 14:49:46 +00:00
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
|
|
|
preempt_enable();
|
2014-02-20 08:44:31 +00:00
|
|
|
}
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-02-20 08:44:31 +00:00
|
|
|
static int saved_cmdlines_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
char buf[TASK_COMM_LEN];
|
|
|
|
unsigned int *pid = v;
|
2009-04-10 20:04:48 +00:00
|
|
|
|
2014-05-30 14:49:46 +00:00
|
|
|
__trace_find_cmdline(*pid, buf);
|
2014-02-20 08:44:31 +00:00
|
|
|
seq_printf(m, "%d %s\n", *pid, buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
|
|
|
|
.start = saved_cmdlines_start,
|
|
|
|
.next = saved_cmdlines_next,
|
|
|
|
.stop = saved_cmdlines_stop,
|
|
|
|
.show = saved_cmdlines_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tracing_check_open_get_tr(NULL);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2014-02-20 08:44:31 +00:00
|
|
|
|
|
|
|
return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
|
2009-04-10 20:04:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations tracing_saved_cmdlines_fops = {
|
2014-02-20 08:44:31 +00:00
|
|
|
.open = tracing_saved_cmdlines_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
2009-04-10 20:04:48 +00:00
|
|
|
};
|
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
char buf[64];
|
|
|
|
int r;
|
|
|
|
|
|
|
|
arch_spin_lock(&trace_cmdline_lock);
|
2014-06-10 07:11:35 +00:00
|
|
|
r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
|
2014-06-05 01:24:27 +00:00
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
|
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
|
|
|
|
{
|
|
|
|
kfree(s->saved_cmdlines);
|
|
|
|
kfree(s->map_cmdline_to_pid);
|
|
|
|
kfree(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tracing_resize_saved_cmdlines(unsigned int val)
|
|
|
|
{
|
|
|
|
struct saved_cmdlines_buffer *s, *savedcmd_temp;
|
|
|
|
|
2014-06-10 07:11:35 +00:00
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
2014-06-05 01:24:27 +00:00
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (allocate_cmdlines_buffer(val, s) < 0) {
|
|
|
|
kfree(s);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
arch_spin_lock(&trace_cmdline_lock);
|
|
|
|
savedcmd_temp = savedcmd;
|
|
|
|
savedcmd = s;
|
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
|
|
|
free_saved_cmdlines_buffer(savedcmd_temp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* must have at least 1 entry or less than PID_MAX_DEFAULT */
|
|
|
|
if (!val || val > PID_MAX_DEFAULT)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = tracing_resize_saved_cmdlines((unsigned int)val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*ppos += cnt;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations tracing_saved_cmdlines_size_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = tracing_saved_cmdlines_size_read,
|
|
|
|
.write = tracing_saved_cmdlines_size_write,
|
|
|
|
};
|
|
|
|
|
2017-05-31 21:56:53 +00:00
|
|
|
#ifdef CONFIG_TRACE_EVAL_MAP_FILE
|
2017-05-31 21:56:45 +00:00
|
|
|
static union trace_eval_map_item *
|
2017-05-31 21:56:48 +00:00
|
|
|
update_eval_map(union trace_eval_map_item *ptr)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
2017-05-31 21:56:43 +00:00
|
|
|
if (!ptr->map.eval_string) {
|
2015-03-31 21:23:45 +00:00
|
|
|
if (ptr->tail.next) {
|
|
|
|
ptr = ptr->tail.next;
|
|
|
|
/* Set ptr to the next real item (skip head) */
|
|
|
|
ptr++;
|
|
|
|
} else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item *ptr = v;
|
2015-03-31 21:23:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Paranoid! If ptr points to end, we don't want to increment past it.
|
|
|
|
* This really should never happen.
|
|
|
|
*/
|
2020-01-24 07:03:01 +00:00
|
|
|
(*pos)++;
|
2017-05-31 21:56:48 +00:00
|
|
|
ptr = update_eval_map(ptr);
|
2015-03-31 21:23:45 +00:00
|
|
|
if (WARN_ON_ONCE(!ptr))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ptr++;
|
2017-05-31 21:56:48 +00:00
|
|
|
ptr = update_eval_map(ptr);
|
2015-03-31 21:23:45 +00:00
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static void *eval_map_start(struct seq_file *m, loff_t *pos)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item *v;
|
2015-03-31 21:23:45 +00:00
|
|
|
loff_t l = 0;
|
|
|
|
|
2017-05-31 21:56:46 +00:00
|
|
|
mutex_lock(&trace_eval_mutex);
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:45 +00:00
|
|
|
v = trace_eval_maps;
|
2015-03-31 21:23:45 +00:00
|
|
|
if (v)
|
|
|
|
v++;
|
|
|
|
|
|
|
|
while (v && l < *pos) {
|
2017-05-31 21:56:48 +00:00
|
|
|
v = eval_map_next(m, v, &l);
|
2015-03-31 21:23:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static void eval_map_stop(struct seq_file *m, void *v)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
2017-05-31 21:56:46 +00:00
|
|
|
mutex_unlock(&trace_eval_mutex);
|
2015-03-31 21:23:45 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static int eval_map_show(struct seq_file *m, void *v)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item *ptr = v;
|
2015-03-31 21:23:45 +00:00
|
|
|
|
|
|
|
seq_printf(m, "%s %ld (%s)\n",
|
2017-05-31 21:56:43 +00:00
|
|
|
ptr->map.eval_string, ptr->map.eval_value,
|
2015-03-31 21:23:45 +00:00
|
|
|
ptr->map.system);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static const struct seq_operations tracing_eval_map_seq_ops = {
|
|
|
|
.start = eval_map_start,
|
|
|
|
.next = eval_map_next,
|
|
|
|
.stop = eval_map_stop,
|
|
|
|
.show = eval_map_show,
|
2015-03-31 21:23:45 +00:00
|
|
|
};
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static int tracing_eval_map_open(struct inode *inode, struct file *filp)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tracing_check_open_get_tr(NULL);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
return seq_open(filp, &tracing_eval_map_seq_ops);
|
2015-03-31 21:23:45 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static const struct file_operations tracing_eval_map_fops = {
|
|
|
|
.open = tracing_eval_map_open,
|
2015-03-31 21:23:45 +00:00
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2017-05-31 21:56:45 +00:00
|
|
|
static inline union trace_eval_map_item *
|
2017-05-31 21:56:47 +00:00
|
|
|
trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
|
|
|
/* Return tail of array given the head */
|
|
|
|
return ptr + ptr->head.length + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-05-31 21:56:48 +00:00
|
|
|
trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
|
2015-03-31 21:23:45 +00:00
|
|
|
int len)
|
|
|
|
{
|
2017-05-31 21:56:43 +00:00
|
|
|
struct trace_eval_map **stop;
|
|
|
|
struct trace_eval_map **map;
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item *map_array;
|
|
|
|
union trace_eval_map_item *ptr;
|
2015-03-31 21:23:45 +00:00
|
|
|
|
|
|
|
stop = start + len;
|
|
|
|
|
|
|
|
/*
|
2017-05-31 21:56:45 +00:00
|
|
|
* The trace_eval_maps contains the map plus a head and tail item,
|
2015-03-31 21:23:45 +00:00
|
|
|
* where the head holds the module and length of array, and the
|
|
|
|
* tail holds a pointer to the next list.
|
|
|
|
*/
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
|
2015-03-31 21:23:45 +00:00
|
|
|
if (!map_array) {
|
2017-05-31 21:56:48 +00:00
|
|
|
pr_warn("Unable to allocate trace eval mapping\n");
|
2015-03-31 21:23:45 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:46 +00:00
|
|
|
mutex_lock(&trace_eval_mutex);
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:45 +00:00
|
|
|
if (!trace_eval_maps)
|
|
|
|
trace_eval_maps = map_array;
|
2015-03-31 21:23:45 +00:00
|
|
|
else {
|
2017-05-31 21:56:45 +00:00
|
|
|
ptr = trace_eval_maps;
|
2015-03-31 21:23:45 +00:00
|
|
|
for (;;) {
|
2017-05-31 21:56:47 +00:00
|
|
|
ptr = trace_eval_jmp_to_tail(ptr);
|
2015-03-31 21:23:45 +00:00
|
|
|
if (!ptr->tail.next)
|
|
|
|
break;
|
|
|
|
ptr = ptr->tail.next;
|
|
|
|
|
|
|
|
}
|
|
|
|
ptr->tail.next = map_array;
|
|
|
|
}
|
|
|
|
map_array->head.mod = mod;
|
|
|
|
map_array->head.length = len;
|
|
|
|
map_array++;
|
|
|
|
|
|
|
|
for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
|
|
|
|
map_array->map = **map;
|
|
|
|
map_array++;
|
|
|
|
}
|
|
|
|
memset(map_array, 0, sizeof(*map_array));
|
|
|
|
|
2017-05-31 21:56:46 +00:00
|
|
|
mutex_unlock(&trace_eval_mutex);
|
2015-03-31 21:23:45 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static void trace_create_eval_file(struct dentry *d_tracer)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
2017-05-31 21:56:53 +00:00
|
|
|
trace_create_file("eval_map", 0444, d_tracer,
|
2017-05-31 21:56:48 +00:00
|
|
|
NULL, &tracing_eval_map_fops);
|
2015-03-31 21:23:45 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:53 +00:00
|
|
|
#else /* CONFIG_TRACE_EVAL_MAP_FILE */
|
2017-05-31 21:56:48 +00:00
|
|
|
static inline void trace_create_eval_file(struct dentry *d_tracer) { }
|
|
|
|
static inline void trace_insert_eval_map_file(struct module *mod,
|
2017-05-31 21:56:43 +00:00
|
|
|
struct trace_eval_map **start, int len) { }
|
2017-05-31 21:56:53 +00:00
|
|
|
#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
static void trace_insert_eval_map(struct module *mod,
|
2017-05-31 21:56:43 +00:00
|
|
|
struct trace_eval_map **start, int len)
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
{
|
2017-05-31 21:56:43 +00:00
|
|
|
struct trace_eval_map **map;
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
|
|
|
|
if (len <= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
map = start;
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
trace_event_eval_update(map, len);
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
trace_insert_eval_map_file(mod, start, len);
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_set_trace_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
struct trace_array *tr = filp->private_data;
|
2009-09-18 06:06:47 +00:00
|
|
|
char buf[MAX_TRACER_SIZE+2];
|
2008-05-12 19:20:42 +00:00
|
|
|
int r;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
2012-05-11 17:29:49 +00:00
|
|
|
r = sprintf(buf, "%s\n", tr->current_trace->name);
|
2008-05-12 19:20:42 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2008-05-12 19:20:46 +00:00
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2009-02-05 20:02:00 +00:00
|
|
|
int tracer_init(struct tracer *t, struct trace_array *tr)
|
|
|
|
{
|
2020-01-09 23:53:48 +00:00
|
|
|
tracing_reset_online_cpus(&tr->array_buffer);
|
2009-02-05 20:02:00 +00:00
|
|
|
return t->init(tr);
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
|
2012-02-02 20:00:41 +00:00
|
|
|
{
|
|
|
|
int cpu;
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2012-02-02 20:00:41 +00:00
|
|
|
for_each_tracing_cpu(cpu)
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
per_cpu_ptr(buf->data, cpu)->entries = val;
|
2012-02-02 20:00:41 +00:00
|
|
|
}
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2012-10-17 02:56:16 +00:00
|
|
|
/* resize @tr's buffer to the size of @size_tr's entries */
|
2020-01-09 23:53:48 +00:00
|
|
|
static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
|
|
|
|
struct array_buffer *size_buf, int cpu_id)
|
2012-10-17 02:56:16 +00:00
|
|
|
{
|
|
|
|
int cpu, ret = 0;
|
|
|
|
|
|
|
|
if (cpu_id == RING_BUFFER_ALL_CPUS) {
|
|
|
|
for_each_tracing_cpu(cpu) {
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
ret = ring_buffer_resize(trace_buf->buffer,
|
|
|
|
per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
|
2012-10-17 02:56:16 +00:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
per_cpu_ptr(trace_buf->data, cpu)->entries =
|
|
|
|
per_cpu_ptr(size_buf->data, cpu)->entries;
|
2012-10-17 02:56:16 +00:00
|
|
|
}
|
|
|
|
} else {
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
ret = ring_buffer_resize(trace_buf->buffer,
|
|
|
|
per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
|
2012-10-17 02:56:16 +00:00
|
|
|
if (ret == 0)
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
per_cpu_ptr(trace_buf->data, cpu_id)->entries =
|
|
|
|
per_cpu_ptr(size_buf->data, cpu_id)->entries;
|
2012-10-17 02:56:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
2012-10-17 02:56:16 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
static int __tracing_resize_ring_buffer(struct trace_array *tr,
|
|
|
|
unsigned long size, int cpu)
|
2009-03-11 17:42:01 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If kernel or user changes the size of the ring buffer
|
2009-03-12 15:21:08 +00:00
|
|
|
* we use the size that was given, and we can forget about
|
|
|
|
* expanding it later.
|
2009-03-11 17:42:01 +00:00
|
|
|
*/
|
2013-03-08 03:48:09 +00:00
|
|
|
ring_buffer_expanded = true;
|
2009-03-11 17:42:01 +00:00
|
|
|
|
2012-10-11 01:44:34 +00:00
|
|
|
/* May be called before buffers are initialized */
|
2020-01-09 23:53:48 +00:00
|
|
|
if (!tr->array_buffer.buffer)
|
2012-10-11 01:44:34 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
|
2009-03-11 17:42:01 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2012-05-11 17:29:49 +00:00
|
|
|
if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
|
|
|
|
!tr->current_trace->use_max_tr)
|
2010-07-01 05:34:35 +00:00
|
|
|
goto out;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
|
2009-03-11 17:42:01 +00:00
|
|
|
if (ret < 0) {
|
2020-01-09 23:53:48 +00:00
|
|
|
int r = resize_buffer_duplicate_size(&tr->array_buffer,
|
|
|
|
&tr->array_buffer, cpu);
|
2009-03-11 17:42:01 +00:00
|
|
|
if (r < 0) {
|
2009-03-12 15:21:08 +00:00
|
|
|
/*
|
|
|
|
* AARGH! We are left with different
|
|
|
|
* size max buffer!!!!
|
|
|
|
* The max buffer is our "snapshot" buffer.
|
|
|
|
* When a tracer needs a snapshot (one of the
|
|
|
|
* latency tracers), it swaps the max buffer
|
|
|
|
* with the saved snap shot. We succeeded to
|
|
|
|
* update the size of the main buffer, but failed to
|
|
|
|
* update the size of the max buffer. But when we tried
|
|
|
|
* to reset the main buffer to the original size, we
|
|
|
|
* failed there too. This is very unlikely to
|
|
|
|
* happen, but if it does, warn and kill all
|
|
|
|
* tracing.
|
|
|
|
*/
|
2009-03-11 17:42:01 +00:00
|
|
|
WARN_ON(1);
|
|
|
|
tracing_disabled = 1;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-02-02 20:00:41 +00:00
|
|
|
if (cpu == RING_BUFFER_ALL_CPUS)
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
set_buffer_entries(&tr->max_buffer, size);
|
2012-02-02 20:00:41 +00:00
|
|
|
else
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
|
2012-02-02 20:00:41 +00:00
|
|
|
|
2010-07-01 05:34:35 +00:00
|
|
|
out:
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
|
|
|
|
2012-02-02 20:00:41 +00:00
|
|
|
if (cpu == RING_BUFFER_ALL_CPUS)
|
2020-01-09 23:53:48 +00:00
|
|
|
set_buffer_entries(&tr->array_buffer, size);
|
2012-02-02 20:00:41 +00:00
|
|
|
else
|
2020-01-09 23:53:48 +00:00
|
|
|
per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
|
2009-03-11 17:42:01 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-01-10 16:06:17 +00:00
|
|
|
ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
|
|
|
|
unsigned long size, int cpu_id)
|
2011-06-14 00:51:57 +00:00
|
|
|
{
|
2012-05-04 01:59:50 +00:00
|
|
|
int ret = size;
|
2011-06-14 00:51:57 +00:00
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2012-02-02 20:00:41 +00:00
|
|
|
if (cpu_id != RING_BUFFER_ALL_CPUS) {
|
|
|
|
/* make sure, this cpu is enabled in the mask */
|
|
|
|
if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2011-06-14 00:51:57 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
|
2011-06-14 00:51:57 +00:00
|
|
|
if (ret < 0)
|
|
|
|
ret = -ENOMEM;
|
|
|
|
|
2012-02-02 20:00:41 +00:00
|
|
|
out:
|
2011-06-14 00:51:57 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-07-01 05:34:35 +00:00
|
|
|
|
2009-03-11 18:33:00 +00:00
|
|
|
/**
|
|
|
|
* tracing_update_buffers - used by tracing facility to expand ring buffers
|
|
|
|
*
|
|
|
|
* To save on memory when the tracing is never used on a system with it
|
|
|
|
* configured in. The ring buffers are set to a minimum size. But once
|
|
|
|
* a user starts to use the tracing facility, then they need to grow
|
|
|
|
* to their default size.
|
|
|
|
*
|
|
|
|
* This function is to be called when a tracer is about to be used.
|
|
|
|
*/
|
|
|
|
int tracing_update_buffers(void)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2009-03-12 15:33:20 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2009-03-11 18:33:00 +00:00
|
|
|
if (!ring_buffer_expanded)
|
2012-05-11 17:29:49 +00:00
|
|
|
ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
|
2012-02-02 20:00:41 +00:00
|
|
|
RING_BUFFER_ALL_CPUS);
|
2009-03-12 15:33:20 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
2009-03-11 18:33:00 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-02-27 04:43:05 +00:00
|
|
|
struct trace_option_dentry;
|
|
|
|
|
2015-09-30 18:27:31 +00:00
|
|
|
static void
|
2012-05-11 17:29:49 +00:00
|
|
|
create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
|
2009-02-27 04:43:05 +00:00
|
|
|
|
2014-01-14 13:43:01 +00:00
|
|
|
/*
|
|
|
|
* Used to clear out the tracer before deletion of an instance.
|
|
|
|
* Must have trace_types_lock held.
|
|
|
|
*/
|
|
|
|
static void tracing_set_nop(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
if (tr->current_trace == &nop_trace)
|
|
|
|
return;
|
|
|
|
|
2014-01-14 13:52:35 +00:00
|
|
|
tr->current_trace->enabled--;
|
2014-01-14 13:43:01 +00:00
|
|
|
|
|
|
|
if (tr->current_trace->reset)
|
|
|
|
tr->current_trace->reset(tr);
|
|
|
|
|
|
|
|
tr->current_trace = &nop_trace;
|
|
|
|
}
|
|
|
|
|
2015-09-29 21:31:55 +00:00
|
|
|
static void add_tracer_options(struct trace_array *tr, struct tracer *t)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2015-02-03 17:45:53 +00:00
|
|
|
/* Only enable if the directory has been created already. */
|
|
|
|
if (!tr->dir)
|
|
|
|
return;
|
|
|
|
|
2015-09-30 18:27:31 +00:00
|
|
|
create_trace_option_files(tr, t);
|
2015-02-03 17:45:53 +00:00
|
|
|
}
|
|
|
|
|
2020-01-10 16:06:17 +00:00
|
|
|
int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
2015-02-03 17:45:53 +00:00
|
|
|
{
|
2008-05-12 19:20:42 +00:00
|
|
|
struct tracer *t;
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2013-01-22 18:35:11 +00:00
|
|
|
bool had_max_tr;
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2008-11-01 18:57:37 +00:00
|
|
|
int ret = 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-03-12 15:33:20 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2009-03-11 17:42:01 +00:00
|
|
|
if (!ring_buffer_expanded) {
|
2012-05-11 17:29:49 +00:00
|
|
|
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
|
2012-02-02 20:00:41 +00:00
|
|
|
RING_BUFFER_ALL_CPUS);
|
2009-03-11 17:42:01 +00:00
|
|
|
if (ret < 0)
|
2009-03-15 21:10:39 +00:00
|
|
|
goto out;
|
2009-03-11 17:42:01 +00:00
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
for (t = trace_types; t; t = t->next) {
|
|
|
|
if (strcmp(t->name, buf) == 0)
|
|
|
|
break;
|
|
|
|
}
|
2008-10-04 20:04:44 +00:00
|
|
|
if (!t) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-05-11 17:29:49 +00:00
|
|
|
if (t == tr->current_trace)
|
2008-05-12 19:20:42 +00:00
|
|
|
goto out;
|
|
|
|
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
|
|
if (t->use_max_tr) {
|
|
|
|
arch_spin_lock(&tr->max_lock);
|
|
|
|
if (tr->cond_snapshot)
|
|
|
|
ret = -EBUSY;
|
|
|
|
arch_spin_unlock(&tr->max_lock);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
#endif
|
2017-09-11 06:26:35 +00:00
|
|
|
/* Some tracers won't work on kernel command line */
|
|
|
|
if (system_state < SYSTEM_RUNNING && t->noboot) {
|
|
|
|
pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
|
|
|
|
t->name);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-11-07 03:42:48 +00:00
|
|
|
/* Some tracers are only allowed for the top level buffer */
|
|
|
|
if (!trace_ok_for_array(t, tr)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-12-16 01:13:31 +00:00
|
|
|
/* If trace pipe files are being read, we can't change the tracer */
|
2020-06-30 03:45:56 +00:00
|
|
|
if (tr->trace_ref) {
|
2014-12-16 01:13:31 +00:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-11-12 20:24:24 +00:00
|
|
|
trace_branch_disable();
|
2013-03-14 19:03:53 +00:00
|
|
|
|
2014-01-14 13:52:35 +00:00
|
|
|
tr->current_trace->enabled--;
|
2013-03-14 19:03:53 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->current_trace->reset)
|
|
|
|
tr->current_trace->reset(tr);
|
2013-01-22 18:35:11 +00:00
|
|
|
|
2018-11-07 02:44:52 +00:00
|
|
|
/* Current trace needs to be nop_trace before synchronize_rcu */
|
2012-05-11 17:29:49 +00:00
|
|
|
tr->current_trace = &nop_trace;
|
2013-01-22 18:35:11 +00:00
|
|
|
|
2013-03-05 23:25:02 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
had_max_tr = tr->allocated_snapshot;
|
2013-01-22 18:35:11 +00:00
|
|
|
|
|
|
|
if (had_max_tr && !t->use_max_tr) {
|
|
|
|
/*
|
|
|
|
* We need to make sure that the update_max_tr sees that
|
|
|
|
* current_trace changed to nop_trace to keep it from
|
|
|
|
* swapping the buffers after we resize it.
|
|
|
|
* The update_max_tr is called from interrupts disabled
|
|
|
|
* so a synchronized_sched() is sufficient.
|
|
|
|
*/
|
2018-11-07 02:44:52 +00:00
|
|
|
synchronize_rcu();
|
2013-03-12 15:17:54 +00:00
|
|
|
free_snapshot(tr);
|
2010-07-01 05:34:35 +00:00
|
|
|
}
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2013-01-22 18:35:11 +00:00
|
|
|
if (t->use_max_tr && !had_max_tr) {
|
2018-05-28 14:56:36 +00:00
|
|
|
ret = tracing_alloc_snapshot_instance(tr);
|
2012-10-17 02:56:16 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2010-07-01 05:34:35 +00:00
|
|
|
}
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2009-02-27 04:43:05 +00:00
|
|
|
|
2008-11-16 04:57:26 +00:00
|
|
|
if (t->init) {
|
2009-02-05 20:02:00 +00:00
|
|
|
ret = tracer_init(t, tr);
|
2008-11-16 04:57:26 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
tr->current_trace = t;
|
2014-01-14 13:52:35 +00:00
|
|
|
tr->current_trace->enabled++;
|
2008-11-12 20:24:24 +00:00
|
|
|
trace_branch_enable(tr);
|
2008-05-12 19:20:42 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2008-11-01 18:57:37 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2013-11-07 03:42:48 +00:00
|
|
|
struct trace_array *tr = filp->private_data;
|
2009-09-18 06:06:47 +00:00
|
|
|
char buf[MAX_TRACER_SIZE+1];
|
2008-11-01 18:57:37 +00:00
|
|
|
int i;
|
|
|
|
size_t ret;
|
2008-11-16 04:53:19 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ret = cnt;
|
2008-11-01 18:57:37 +00:00
|
|
|
|
2009-09-18 06:06:47 +00:00
|
|
|
if (cnt > MAX_TRACER_SIZE)
|
|
|
|
cnt = MAX_TRACER_SIZE;
|
2008-11-01 18:57:37 +00:00
|
|
|
|
2016-04-18 07:23:29 +00:00
|
|
|
if (copy_from_user(buf, ubuf, cnt))
|
2008-11-01 18:57:37 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
buf[cnt] = 0;
|
|
|
|
|
|
|
|
/* strip ending whitespace. */
|
|
|
|
for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
|
|
|
|
buf[i] = 0;
|
|
|
|
|
2013-11-07 03:42:48 +00:00
|
|
|
err = tracing_set_tracer(tr, buf);
|
2008-11-16 04:53:19 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2008-11-01 18:57:37 +00:00
|
|
|
|
2009-10-23 23:36:16 +00:00
|
|
|
*ppos += ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-10-04 20:04:44 +00:00
|
|
|
return ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2014-07-18 11:17:27 +00:00
|
|
|
tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
|
|
|
char buf[64];
|
|
|
|
int r;
|
|
|
|
|
2008-05-12 19:21:00 +00:00
|
|
|
r = snprintf(buf, sizeof(buf), "%ld\n",
|
2008-05-12 19:20:42 +00:00
|
|
|
*ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
|
2008-05-12 19:21:00 +00:00
|
|
|
if (r > sizeof(buf))
|
|
|
|
r = sizeof(buf);
|
2008-05-12 19:20:46 +00:00
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2014-07-18 11:17:27 +00:00
|
|
|
tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2009-02-10 18:44:34 +00:00
|
|
|
unsigned long val;
|
2008-05-12 19:21:00 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2011-06-07 19:58:27 +00:00
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
2008-05-12 19:21:00 +00:00
|
|
|
return ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
|
|
|
*ptr = val * 1000;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2014-07-18 11:17:27 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_thresh_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_thresh_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = filp->private_data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (tr->current_trace->update_thresh) {
|
|
|
|
ret = tr->current_trace->update_thresh(tr);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = cnt;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-09-07 16:45:09 +00:00
|
|
|
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
|
2015-11-09 21:15:15 +00:00
|
|
|
|
2014-07-18 11:17:27 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_max_lat_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
|
|
|
|
}
|
|
|
|
|
2015-11-09 21:15:15 +00:00
|
|
|
#endif
|
|
|
|
|
2008-05-12 19:20:46 +00:00
|
|
|
static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2013-07-23 15:25:57 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2008-05-12 19:20:46 +00:00
|
|
|
struct trace_iterator *iter;
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:46 +00:00
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-02 03:34:22 +00:00
|
|
|
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2008-05-12 19:20:46 +00:00
|
|
|
/* create a buffer to store the information to pass to userspace */
|
|
|
|
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
if (!iter) {
|
|
|
|
ret = -ENOMEM;
|
2013-07-18 18:18:44 +00:00
|
|
|
__trace_array_put(tr);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2008-05-12 19:20:46 +00:00
|
|
|
|
2014-06-25 19:54:42 +00:00
|
|
|
trace_seq_init(&iter->seq);
|
2014-12-16 03:31:07 +00:00
|
|
|
iter->trace = tr->current_trace;
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
|
2008-12-31 23:42:23 +00:00
|
|
|
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
ret = -ENOMEM;
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
goto fail;
|
2008-12-31 23:42:23 +00:00
|
|
|
}
|
|
|
|
|
2008-11-08 03:36:02 +00:00
|
|
|
/* trace pipe does not show start of buffer */
|
2008-12-31 23:42:23 +00:00
|
|
|
cpumask_setall(iter->started);
|
2008-11-08 03:36:02 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
|
2009-06-01 19:16:05 +00:00
|
|
|
iter->iter_flags |= TRACE_FILE_LAT_FMT;
|
|
|
|
|
2012-11-13 20:18:22 +00:00
|
|
|
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
|
2013-04-23 01:32:39 +00:00
|
|
|
if (trace_clocks[tr->clock_id].in_ns)
|
2012-11-13 20:18:22 +00:00
|
|
|
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
|
|
|
|
2013-07-23 15:25:57 +00:00
|
|
|
iter->tr = tr;
|
2020-01-09 23:53:48 +00:00
|
|
|
iter->array_buffer = &tr->array_buffer;
|
2013-07-23 15:25:57 +00:00
|
|
|
iter->cpu_file = tracing_get_cpu(inode);
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_init(&iter->mutex);
|
2008-05-12 19:20:46 +00:00
|
|
|
filp->private_data = iter;
|
|
|
|
|
2008-05-12 19:21:01 +00:00
|
|
|
if (iter->trace->pipe_open)
|
|
|
|
iter->trace->pipe_open(iter);
|
|
|
|
|
2010-07-07 21:40:11 +00:00
|
|
|
nonseekable_open(inode, filp);
|
2014-12-16 01:13:31 +00:00
|
|
|
|
2020-06-30 03:45:56 +00:00
|
|
|
tr->trace_ref++;
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
return ret;
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
kfree(iter);
|
2013-07-02 03:34:22 +00:00
|
|
|
__trace_array_put(tr);
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
return ret;
|
2008-05-12 19:20:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tracing_release_pipe(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct trace_iterator *iter = file->private_data;
|
2013-07-23 15:25:57 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2008-05-12 19:20:46 +00:00
|
|
|
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2020-06-30 03:45:56 +00:00
|
|
|
tr->trace_ref--;
|
2014-12-16 01:13:31 +00:00
|
|
|
|
2009-12-09 17:37:43 +00:00
|
|
|
if (iter->trace->pipe_close)
|
2009-12-07 14:06:24 +00:00
|
|
|
iter->trace->pipe_close(iter);
|
|
|
|
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2008-12-31 23:42:23 +00:00
|
|
|
free_cpumask_var(iter->started);
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_destroy(&iter->mutex);
|
2008-05-12 19:20:46 +00:00
|
|
|
kfree(iter);
|
|
|
|
|
2013-07-02 03:34:22 +00:00
|
|
|
trace_array_put(tr);
|
|
|
|
|
2008-05-12 19:20:46 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-03 04:42:43 +00:00
|
|
|
static __poll_t
|
2013-02-28 14:17:16 +00:00
|
|
|
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
|
2008-05-12 19:20:49 +00:00
|
|
|
{
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = iter->tr;
|
|
|
|
|
2013-03-01 00:59:17 +00:00
|
|
|
/* Iterators are static, they should be filled or empty */
|
|
|
|
if (trace_buffer_iter(iter, iter->cpu_file))
|
2018-02-11 22:34:03 +00:00
|
|
|
return EPOLLIN | EPOLLRDNORM;
|
2008-05-12 19:20:49 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & TRACE_ITER_BLOCK)
|
2008-05-12 19:20:49 +00:00
|
|
|
/*
|
|
|
|
* Always select as readable when in blocking mode
|
|
|
|
*/
|
2018-02-11 22:34:03 +00:00
|
|
|
return EPOLLIN | EPOLLRDNORM;
|
2013-03-01 00:59:17 +00:00
|
|
|
else
|
2020-01-09 23:53:48 +00:00
|
|
|
return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
|
2013-03-01 00:59:17 +00:00
|
|
|
filp, poll_table);
|
2008-05-12 19:20:49 +00:00
|
|
|
}
|
|
|
|
|
2017-07-03 04:42:43 +00:00
|
|
|
static __poll_t
|
2013-02-28 14:17:16 +00:00
|
|
|
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|
|
|
{
|
|
|
|
struct trace_iterator *iter = filp->private_data;
|
|
|
|
|
|
|
|
return trace_poll(iter, filp, poll_table);
|
2008-05-12 19:20:49 +00:00
|
|
|
}
|
|
|
|
|
2014-12-16 03:31:07 +00:00
|
|
|
/* Must be called with iter->mutex held. */
|
2009-02-09 06:15:55 +00:00
|
|
|
static int tracing_wait_pipe(struct file *filp)
|
2008-05-12 19:20:46 +00:00
|
|
|
{
|
|
|
|
struct trace_iterator *iter = filp->private_data;
|
2014-06-10 13:46:00 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:46 +00:00
|
|
|
|
|
|
|
while (trace_empty(iter)) {
|
2008-05-12 19:20:58 +00:00
|
|
|
|
2008-05-12 19:21:01 +00:00
|
|
|
if ((filp->f_flags & O_NONBLOCK)) {
|
2009-02-09 06:15:55 +00:00
|
|
|
return -EAGAIN;
|
2008-05-12 19:21:01 +00:00
|
|
|
}
|
2008-05-12 19:20:58 +00:00
|
|
|
|
2008-05-12 19:20:46 +00:00
|
|
|
/*
|
2013-01-14 02:54:11 +00:00
|
|
|
* We block until we read something and tracing is disabled.
|
2008-05-12 19:20:46 +00:00
|
|
|
* We still block if tracing is disabled, but we have never
|
|
|
|
* read anything. This allows a user to cat this file, and
|
|
|
|
* then enable tracing. But after we have read something,
|
|
|
|
* we give an EOF when tracing is again disabled.
|
|
|
|
*
|
|
|
|
* iter->pos will be 0 if we haven't read anything.
|
|
|
|
*/
|
2017-09-17 10:23:48 +00:00
|
|
|
if (!tracer_tracing_is_on(iter->tr) && iter->pos)
|
2008-05-12 19:20:46 +00:00
|
|
|
break;
|
2014-04-29 20:07:28 +00:00
|
|
|
|
|
|
|
mutex_unlock(&iter->mutex);
|
|
|
|
|
2018-11-30 01:32:26 +00:00
|
|
|
ret = wait_on_pipe(iter, 0);
|
2014-04-29 20:07:28 +00:00
|
|
|
|
|
|
|
mutex_lock(&iter->mutex);
|
|
|
|
|
2014-06-10 13:46:00 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-05-12 19:20:46 +00:00
|
|
|
}
|
|
|
|
|
2009-02-09 06:15:55 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Consumer reader.
|
|
|
|
*/
|
|
|
|
static ssize_t
|
|
|
|
tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_iterator *iter = filp->private_data;
|
|
|
|
ssize_t sret;
|
|
|
|
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
/*
|
|
|
|
* Avoid more than one consumer on a single file descriptor
|
|
|
|
* This is just a matter of traces coherency, the ring buffer itself
|
|
|
|
* is protected.
|
|
|
|
*/
|
|
|
|
mutex_lock(&iter->mutex);
|
2016-09-24 02:57:13 +00:00
|
|
|
|
|
|
|
/* return any leftover data */
|
|
|
|
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
|
|
|
|
if (sret != -EBUSY)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
trace_seq_init(&iter->seq);
|
|
|
|
|
2009-02-09 06:15:55 +00:00
|
|
|
if (iter->trace->read) {
|
|
|
|
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
|
|
|
|
if (sret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
waitagain:
|
|
|
|
sret = tracing_wait_pipe(filp);
|
|
|
|
if (sret <= 0)
|
|
|
|
goto out;
|
|
|
|
|
2008-05-12 19:20:46 +00:00
|
|
|
/* stop when tracing is finished */
|
2009-02-09 06:15:55 +00:00
|
|
|
if (trace_empty(iter)) {
|
|
|
|
sret = 0;
|
2008-05-12 19:21:01 +00:00
|
|
|
goto out;
|
2009-02-09 06:15:55 +00:00
|
|
|
}
|
2008-05-12 19:20:46 +00:00
|
|
|
|
|
|
|
if (cnt >= PAGE_SIZE)
|
|
|
|
cnt = PAGE_SIZE - 1;
|
|
|
|
|
2008-05-12 19:21:01 +00:00
|
|
|
/* reset all but tr, trace, and overruns */
|
|
|
|
memset(&iter->seq, 0,
|
|
|
|
sizeof(struct trace_iterator) -
|
|
|
|
offsetof(struct trace_iterator, seq));
|
2013-08-02 17:16:43 +00:00
|
|
|
cpumask_clear(iter->started);
|
2019-10-11 14:21:34 +00:00
|
|
|
trace_seq_init(&iter->seq);
|
2008-05-12 19:21:01 +00:00
|
|
|
iter->pos = -1;
|
2008-05-12 19:20:46 +00:00
|
|
|
|
2009-05-18 11:35:34 +00:00
|
|
|
trace_event_read_lock();
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
trace_access_lock(iter->cpu_file);
|
2010-08-05 14:22:23 +00:00
|
|
|
while (trace_find_next_entry_inc(iter) != NULL) {
|
2008-09-29 18:18:34 +00:00
|
|
|
enum print_line_t ret;
|
2014-11-14 20:49:41 +00:00
|
|
|
int save_len = iter->seq.seq.len;
|
2008-05-12 19:20:48 +00:00
|
|
|
|
2008-05-12 19:20:47 +00:00
|
|
|
ret = print_trace_line(iter);
|
2008-09-29 18:18:34 +00:00
|
|
|
if (ret == TRACE_TYPE_PARTIAL_LINE) {
|
2008-05-12 19:20:48 +00:00
|
|
|
/* don't print partial lines */
|
2014-11-14 20:49:41 +00:00
|
|
|
iter->seq.seq.len = save_len;
|
2008-05-12 19:20:46 +00:00
|
|
|
break;
|
2008-05-12 19:20:48 +00:00
|
|
|
}
|
2009-02-06 17:30:44 +00:00
|
|
|
if (ret != TRACE_TYPE_NO_CONSUME)
|
|
|
|
trace_consume(iter);
|
2008-05-12 19:20:46 +00:00
|
|
|
|
2014-11-14 20:49:41 +00:00
|
|
|
if (trace_seq_used(&iter->seq) >= cnt)
|
2008-05-12 19:20:46 +00:00
|
|
|
break;
|
2011-03-25 11:05:18 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Setting the full flag means we reached the trace_seq buffer
|
|
|
|
* size and we should leave by partial output condition above.
|
|
|
|
* One of the trace_seq_* functions is not used properly.
|
|
|
|
*/
|
|
|
|
WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
|
|
|
|
iter->ent->type);
|
2008-05-12 19:20:46 +00:00
|
|
|
}
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
trace_access_unlock(iter->cpu_file);
|
2009-05-18 11:35:34 +00:00
|
|
|
trace_event_read_unlock();
|
2008-05-12 19:20:46 +00:00
|
|
|
|
|
|
|
/* Now copy what we have to the user */
|
2008-05-12 19:21:02 +00:00
|
|
|
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
|
2014-11-14 20:49:41 +00:00
|
|
|
if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
|
2009-03-02 19:04:40 +00:00
|
|
|
trace_seq_init(&iter->seq);
|
2008-09-29 18:23:48 +00:00
|
|
|
|
|
|
|
/*
|
2011-03-31 01:57:33 +00:00
|
|
|
* If there was nothing to send to user, in spite of consuming trace
|
2008-09-29 18:23:48 +00:00
|
|
|
* entries, go back to wait for more entries.
|
|
|
|
*/
|
2008-05-12 19:21:02 +00:00
|
|
|
if (sret == -EBUSY)
|
2008-09-29 18:23:48 +00:00
|
|
|
goto waitagain;
|
2008-05-12 19:20:46 +00:00
|
|
|
|
2008-05-12 19:21:01 +00:00
|
|
|
out:
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_unlock(&iter->mutex);
|
2008-05-12 19:21:01 +00:00
|
|
|
|
2008-05-12 19:21:02 +00:00
|
|
|
return sret;
|
2008-05-12 19:20:46 +00:00
|
|
|
}
|
|
|
|
|
2009-02-09 06:15:56 +00:00
|
|
|
static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
|
|
|
|
unsigned int idx)
|
|
|
|
{
|
|
|
|
__free_page(spd->pages[idx]);
|
|
|
|
}
|
|
|
|
|
2009-02-09 17:06:29 +00:00
|
|
|
static size_t
|
2009-02-11 01:51:30 +00:00
|
|
|
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
|
2009-02-09 17:06:29 +00:00
|
|
|
{
|
|
|
|
size_t count;
|
2014-11-17 18:12:22 +00:00
|
|
|
int save_len;
|
2009-02-09 17:06:29 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Seq buffer is page-sized, exactly what we need. */
|
|
|
|
for (;;) {
|
2014-11-17 18:12:22 +00:00
|
|
|
save_len = iter->seq.seq.len;
|
2009-02-09 17:06:29 +00:00
|
|
|
ret = print_trace_line(iter);
|
2014-11-17 18:12:22 +00:00
|
|
|
|
|
|
|
if (trace_seq_has_overflowed(&iter->seq)) {
|
|
|
|
iter->seq.seq.len = save_len;
|
2009-02-09 17:06:29 +00:00
|
|
|
break;
|
|
|
|
}
|
2014-11-17 18:12:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This should not be hit, because it should only
|
|
|
|
* be set if the iter->seq overflowed. But check it
|
|
|
|
* anyway to be safe.
|
|
|
|
*/
|
2009-02-09 17:06:29 +00:00
|
|
|
if (ret == TRACE_TYPE_PARTIAL_LINE) {
|
2014-11-17 18:12:22 +00:00
|
|
|
iter->seq.seq.len = save_len;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-11-14 20:49:41 +00:00
|
|
|
count = trace_seq_used(&iter->seq) - save_len;
|
2014-11-17 18:12:22 +00:00
|
|
|
if (rem < count) {
|
|
|
|
rem = 0;
|
|
|
|
iter->seq.seq.len = save_len;
|
2009-02-09 17:06:29 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-07-28 12:17:22 +00:00
|
|
|
if (ret != TRACE_TYPE_NO_CONSUME)
|
|
|
|
trace_consume(iter);
|
2009-02-09 17:06:29 +00:00
|
|
|
rem -= count;
|
2010-08-05 14:22:23 +00:00
|
|
|
if (!trace_find_next_entry_inc(iter)) {
|
2009-02-09 17:06:29 +00:00
|
|
|
rem = 0;
|
|
|
|
iter->ent = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rem;
|
|
|
|
}
|
|
|
|
|
2009-02-09 06:15:56 +00:00
|
|
|
static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|
|
|
loff_t *ppos,
|
|
|
|
struct pipe_inode_info *pipe,
|
|
|
|
size_t len,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2010-05-20 08:43:18 +00:00
|
|
|
struct page *pages_def[PIPE_DEF_BUFFERS];
|
|
|
|
struct partial_page partial_def[PIPE_DEF_BUFFERS];
|
2009-02-09 06:15:56 +00:00
|
|
|
struct trace_iterator *iter = filp->private_data;
|
|
|
|
struct splice_pipe_desc spd = {
|
2010-05-20 08:43:18 +00:00
|
|
|
.pages = pages_def,
|
|
|
|
.partial = partial_def,
|
2009-02-09 17:06:29 +00:00
|
|
|
.nr_pages = 0, /* This gets updated below. */
|
2012-06-12 13:24:40 +00:00
|
|
|
.nr_pages_max = PIPE_DEF_BUFFERS,
|
2020-05-20 15:58:13 +00:00
|
|
|
.ops = &default_pipe_buf_ops,
|
2009-02-09 17:06:29 +00:00
|
|
|
.spd_release = tracing_spd_release_pipe,
|
2009-02-09 06:15:56 +00:00
|
|
|
};
|
|
|
|
ssize_t ret;
|
2009-02-09 17:06:29 +00:00
|
|
|
size_t rem;
|
2009-02-09 06:15:56 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2010-05-20 08:43:18 +00:00
|
|
|
if (splice_grow_spd(pipe, &spd))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_lock(&iter->mutex);
|
2009-02-09 06:15:56 +00:00
|
|
|
|
|
|
|
if (iter->trace->splice_read) {
|
|
|
|
ret = iter->trace->splice_read(iter, filp,
|
|
|
|
ppos, pipe, len, flags);
|
|
|
|
if (ret)
|
2009-02-09 17:06:29 +00:00
|
|
|
goto out_err;
|
2009-02-09 06:15:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = tracing_wait_pipe(filp);
|
|
|
|
if (ret <= 0)
|
2009-02-09 17:06:29 +00:00
|
|
|
goto out_err;
|
2009-02-09 06:15:56 +00:00
|
|
|
|
2010-08-05 14:22:23 +00:00
|
|
|
if (!iter->ent && !trace_find_next_entry_inc(iter)) {
|
2009-02-09 06:15:56 +00:00
|
|
|
ret = -EFAULT;
|
2009-02-09 17:06:29 +00:00
|
|
|
goto out_err;
|
2009-02-09 06:15:56 +00:00
|
|
|
}
|
|
|
|
|
2009-05-18 11:35:34 +00:00
|
|
|
trace_event_read_lock();
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
trace_access_lock(iter->cpu_file);
|
2009-05-18 11:35:34 +00:00
|
|
|
|
2009-02-09 06:15:56 +00:00
|
|
|
/* Fill as many pages as possible. */
|
2014-04-11 16:01:03 +00:00
|
|
|
for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
|
2010-05-20 08:43:18 +00:00
|
|
|
spd.pages[i] = alloc_page(GFP_KERNEL);
|
|
|
|
if (!spd.pages[i])
|
2009-02-09 17:06:29 +00:00
|
|
|
break;
|
2009-02-09 06:15:56 +00:00
|
|
|
|
2009-02-11 01:51:30 +00:00
|
|
|
rem = tracing_fill_pipe_page(rem, iter);
|
2009-02-09 06:15:56 +00:00
|
|
|
|
|
|
|
/* Copy the data into the page, so we can start over. */
|
|
|
|
ret = trace_seq_to_buffer(&iter->seq,
|
2010-05-20 08:43:18 +00:00
|
|
|
page_address(spd.pages[i]),
|
2014-11-14 20:49:41 +00:00
|
|
|
trace_seq_used(&iter->seq));
|
2009-02-09 06:15:56 +00:00
|
|
|
if (ret < 0) {
|
2010-05-20 08:43:18 +00:00
|
|
|
__free_page(spd.pages[i]);
|
2009-02-09 06:15:56 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-05-20 08:43:18 +00:00
|
|
|
spd.partial[i].offset = 0;
|
2014-11-14 20:49:41 +00:00
|
|
|
spd.partial[i].len = trace_seq_used(&iter->seq);
|
2009-02-09 06:15:56 +00:00
|
|
|
|
2009-03-02 19:04:40 +00:00
|
|
|
trace_seq_init(&iter->seq);
|
2009-02-09 06:15:56 +00:00
|
|
|
}
|
|
|
|
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
trace_access_unlock(iter->cpu_file);
|
2009-05-18 11:35:34 +00:00
|
|
|
trace_event_read_unlock();
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_unlock(&iter->mutex);
|
2009-02-09 06:15:56 +00:00
|
|
|
|
|
|
|
spd.nr_pages = i;
|
|
|
|
|
2016-03-18 19:46:48 +00:00
|
|
|
if (i)
|
|
|
|
ret = splice_to_pipe(pipe, &spd);
|
|
|
|
else
|
|
|
|
ret = 0;
|
2010-05-20 08:43:18 +00:00
|
|
|
out:
|
2012-06-12 13:24:40 +00:00
|
|
|
splice_shrink_spd(&spd);
|
2010-05-20 08:43:18 +00:00
|
|
|
return ret;
|
2009-02-09 06:15:56 +00:00
|
|
|
|
2009-02-09 17:06:29 +00:00
|
|
|
out_err:
|
tracing/core: make the read callbacks reentrants
Now that several per-cpu files can be read or spliced at the
same, we want the read/splice callbacks for tracing files to be
reentrants.
Until now, a single global mutex (trace_types_lock) serialized
the access to tracing_read_pipe(), tracing_splice_read_pipe(),
and the seq helpers.
Ie: it means that if a user tries to read trace_pipe0 and
trace_pipe1 at the same time, the access to the function
tracing_read_pipe() is contended and one reader must wait for
the other to finish its read call.
The trace_type_lock mutex is mostly here to serialize the access
to the global current tracer (current_trace), which can be
changed concurrently. Although the iter struct keeps a private
pointer to this tracer, its callbacks can be changed by another
function.
The method used here is to not keep anymore private reference to
the tracer inside the iterator but to make a copy of it inside
the iterator. Then it checks on subsequents read calls if the
tracer has changed. This is not costly because the current
tracer is not expected to be changed often, so we use a branch
prediction for that.
Moreover, we add a private mutex to the iterator (there is one
iterator per file descriptor) to serialize the accesses in case
of multiple consumers per file descriptor (which would be a
silly idea from the user). Note that this is not to protect the
ring buffer, since the ring buffer already serializes the
readers accesses. This is to prevent from traces weirdness in
case of concurrent consumers. But these mutexes can be dropped
anyway, that would not result in any crash. Just tell me what
you think about it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 05:13:16 +00:00
|
|
|
mutex_unlock(&iter->mutex);
|
2010-05-20 08:43:18 +00:00
|
|
|
goto out;
|
2009-02-09 06:15:56 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:59 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_entries_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2013-07-23 15:26:06 +00:00
|
|
|
struct inode *inode = file_inode(filp);
|
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
int cpu = tracing_get_cpu(inode);
|
2012-02-02 20:00:41 +00:00
|
|
|
char buf[64];
|
|
|
|
int r = 0;
|
|
|
|
ssize_t ret;
|
2008-05-12 19:20:59 +00:00
|
|
|
|
2009-03-12 17:53:25 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2012-02-02 20:00:41 +00:00
|
|
|
|
2013-07-23 15:26:06 +00:00
|
|
|
if (cpu == RING_BUFFER_ALL_CPUS) {
|
2012-02-02 20:00:41 +00:00
|
|
|
int cpu, buf_size_same;
|
|
|
|
unsigned long size;
|
|
|
|
|
|
|
|
size = 0;
|
|
|
|
buf_size_same = 1;
|
|
|
|
/* check if all cpu sizes are same */
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
|
/* fill in the size from first enabled cpu */
|
|
|
|
if (size == 0)
|
2020-01-09 23:53:48 +00:00
|
|
|
size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
|
|
|
|
if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
|
2012-02-02 20:00:41 +00:00
|
|
|
buf_size_same = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buf_size_same) {
|
|
|
|
if (!ring_buffer_expanded)
|
|
|
|
r = sprintf(buf, "%lu (expanded: %lu)\n",
|
|
|
|
size >> 10,
|
|
|
|
trace_buf_size >> 10);
|
|
|
|
else
|
|
|
|
r = sprintf(buf, "%lu\n", size >> 10);
|
|
|
|
} else
|
|
|
|
r = sprintf(buf, "X\n");
|
|
|
|
} else
|
2020-01-09 23:53:48 +00:00
|
|
|
r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
|
2012-02-02 20:00:41 +00:00
|
|
|
|
2009-03-12 17:53:25 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2012-02-02 20:00:41 +00:00
|
|
|
ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
return ret;
|
2008-05-12 19:20:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2013-07-23 15:26:06 +00:00
|
|
|
struct inode *inode = file_inode(filp);
|
|
|
|
struct trace_array *tr = inode->i_private;
|
2008-05-12 19:20:59 +00:00
|
|
|
unsigned long val;
|
2011-06-14 00:51:57 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:59 +00:00
|
|
|
|
2011-06-07 19:58:27 +00:00
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
2008-05-12 19:21:00 +00:00
|
|
|
return ret;
|
2008-05-12 19:20:59 +00:00
|
|
|
|
|
|
|
/* must have at least 1 entry */
|
|
|
|
if (!val)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-11-13 05:09:35 +00:00
|
|
|
/* value is in KB */
|
|
|
|
val <<= 10;
|
2013-07-23 15:26:06 +00:00
|
|
|
ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
|
2011-06-14 00:51:57 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2008-05-12 19:20:59 +00:00
|
|
|
|
2009-10-23 23:36:16 +00:00
|
|
|
*ppos += cnt;
|
2008-05-12 19:20:59 +00:00
|
|
|
|
2011-06-14 00:51:57 +00:00
|
|
|
return cnt;
|
|
|
|
}
|
2008-11-11 02:46:00 +00:00
|
|
|
|
2011-08-16 21:46:15 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_total_entries_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = filp->private_data;
|
|
|
|
char buf[64];
|
|
|
|
int r, cpu;
|
|
|
|
unsigned long size = 0, expanded_size = 0;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
for_each_tracing_cpu(cpu) {
|
2020-01-09 23:53:48 +00:00
|
|
|
size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
|
2011-08-16 21:46:15 +00:00
|
|
|
if (!ring_buffer_expanded)
|
|
|
|
expanded_size += trace_buf_size >> 10;
|
|
|
|
}
|
|
|
|
if (ring_buffer_expanded)
|
|
|
|
r = sprintf(buf, "%lu\n", size);
|
|
|
|
else
|
|
|
|
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
}
|
|
|
|
|
2011-06-14 00:51:57 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* There is no need to read what the user has written, this function
|
|
|
|
* is just to make sure that there is no error when "echo" is used
|
|
|
|
*/
|
|
|
|
|
|
|
|
*ppos += cnt;
|
2008-05-12 19:20:59 +00:00
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2011-06-14 00:51:57 +00:00
|
|
|
static int
|
|
|
|
tracing_free_buffer_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
|
2011-06-15 02:44:07 +00:00
|
|
|
/* disable tracing ? */
|
2015-09-30 13:42:05 +00:00
|
|
|
if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
|
2013-08-03 01:36:15 +00:00
|
|
|
tracer_tracing_off(tr);
|
2011-06-14 00:51:57 +00:00
|
|
|
/* resize the ring buffer to 0 */
|
2012-05-11 17:29:49 +00:00
|
|
|
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
|
2011-06-14 00:51:57 +00:00
|
|
|
|
2013-07-02 03:34:22 +00:00
|
|
|
trace_array_put(tr);
|
|
|
|
|
2011-06-14 00:51:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-16 19:06:42 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_mark_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *fpos)
|
|
|
|
{
|
2013-07-01 22:31:24 +00:00
|
|
|
struct trace_array *tr = filp->private_data;
|
2011-09-22 15:50:27 +00:00
|
|
|
struct ring_buffer_event *event;
|
2018-05-09 18:17:48 +00:00
|
|
|
enum event_trigger_type tt = ETT_NONE;
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2011-09-22 15:50:27 +00:00
|
|
|
struct print_entry *entry;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
ssize_t written;
|
|
|
|
int size;
|
|
|
|
int len;
|
2016-07-06 19:25:08 +00:00
|
|
|
|
2016-12-08 17:40:18 +00:00
|
|
|
/* Used in tracing_mark_raw_write() as well */
|
tracing: Eliminate const char[] auto variables
Automatic const char[] variables cause unnecessary code
generation. For example, the this_mod variable leads to
3f04: 48 b8 5f 5f 74 68 69 73 5f 6d movabs $0x6d5f736968745f5f,%rax # __this_m
3f0e: 4c 8d 44 24 02 lea 0x2(%rsp),%r8
3f13: 48 8d 7c 24 10 lea 0x10(%rsp),%rdi
3f18: 48 89 44 24 02 mov %rax,0x2(%rsp)
3f1d: 4c 89 e9 mov %r13,%rcx
3f20: b8 65 00 00 00 mov $0x65,%eax # e
3f25: 48 c7 c2 00 00 00 00 mov $0x0,%rdx
3f28: R_X86_64_32S .rodata.str1.1+0x18d
3f2c: be 48 00 00 00 mov $0x48,%esi
3f31: c7 44 24 0a 6f 64 75 6c movl $0x6c75646f,0xa(%rsp) # odul
3f39: 66 89 44 24 0e mov %ax,0xe(%rsp)
i.e., the string gets built on the stack at runtime. Similar code can be
found for the other instances I'm replacing here. Putting the string
in .rodata reduces the combined .text+.rodata size and saves time and
stack space at runtime.
The simplest fix, and what I've done for the this_mod case, is to just
make the variable static.
However, for the "<faulted>" case where the same string is used twice,
that prevents the linker from merging those two literals, so instead use
a macro - that also keeps the two instances automatically in
sync (instead of only the compile-time strlen expression).
Finally, for the two runs of spaces, it turns out that the "build
these strings on the stack" is not the worst part of what gcc does -
it turns print_func_help_header_irq() into "if (tgid) { /*
print_event_info + five seq_printf calls */ } else { /* print
event_info + another five seq_printf */}". Taking inspiration from a
suggestion from Al Viro, use %.*s to make snprintf either stop after
the first two spaces or print the whole string. As a bonus, the
seq_printfs now fit on single lines (at least, they are not longer
than the existing ones in the function just above), making it easier
to see that the ascii art lines up.
x86-64 defconfig + CONFIG_FUNCTION_TRACER:
$ scripts/stackdelta /tmp/stackusage.{0,1}
./kernel/trace/ftrace.c ftrace_mod_callback 152 136 -16
./kernel/trace/trace.c trace_default_header 56 32 -24
./kernel/trace/trace.c tracing_mark_raw_write 96 72 -24
./kernel/trace/trace.c tracing_mark_write 104 80 -24
bloat-o-meter
add/remove: 1/0 grow/shrink: 0/4 up/down: 14/-375 (-361)
Function old new delta
this_mod - 14 +14
ftrace_mod_callback 577 542 -35
tracing_mark_raw_write 444 374 -70
tracing_mark_write 616 540 -76
trace_default_header 600 406 -194
Link: http://lkml.kernel.org/r/20190320081757.6037-1-linux@rasmusvillemoes.dk
Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-03-20 08:17:57 +00:00
|
|
|
#define FAULTED_STR "<faulted>"
|
|
|
|
#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
|
2008-09-16 19:06:42 +00:00
|
|
|
|
2008-11-08 03:36:02 +00:00
|
|
|
if (tracing_disabled)
|
2008-09-16 19:06:42 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
if (!(tr->trace_flags & TRACE_ITER_MARKERS))
|
2012-09-08 01:12:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-09-16 19:06:42 +00:00
|
|
|
if (cnt > TRACE_BUF_SIZE)
|
|
|
|
cnt = TRACE_BUF_SIZE;
|
|
|
|
|
2011-09-22 15:50:27 +00:00
|
|
|
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
|
2008-09-16 19:06:42 +00:00
|
|
|
|
2011-09-22 15:50:27 +00:00
|
|
|
local_save_flags(irq_flags);
|
2016-12-08 17:40:18 +00:00
|
|
|
size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
|
2011-09-22 15:50:27 +00:00
|
|
|
|
2016-12-08 17:40:18 +00:00
|
|
|
/* If less than "<faulted>", then make sure we can still add that */
|
|
|
|
if (cnt < FAULTED_SIZE)
|
|
|
|
size += FAULTED_SIZE - cnt;
|
2011-09-22 15:50:27 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = tr->array_buffer.buffer;
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
|
|
|
|
irq_flags, preempt_count());
|
2016-12-08 17:40:18 +00:00
|
|
|
if (unlikely(!event))
|
2011-09-22 15:50:27 +00:00
|
|
|
/* Ring buffer disabled, return as if not open for write */
|
2016-12-08 17:40:18 +00:00
|
|
|
return -EBADF;
|
2011-09-22 15:50:27 +00:00
|
|
|
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
entry->ip = _THIS_IP_;
|
|
|
|
|
2016-12-08 17:40:18 +00:00
|
|
|
len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
|
|
|
|
if (len) {
|
tracing: Eliminate const char[] auto variables
Automatic const char[] variables cause unnecessary code
generation. For example, the this_mod variable leads to
3f04: 48 b8 5f 5f 74 68 69 73 5f 6d movabs $0x6d5f736968745f5f,%rax # __this_m
3f0e: 4c 8d 44 24 02 lea 0x2(%rsp),%r8
3f13: 48 8d 7c 24 10 lea 0x10(%rsp),%rdi
3f18: 48 89 44 24 02 mov %rax,0x2(%rsp)
3f1d: 4c 89 e9 mov %r13,%rcx
3f20: b8 65 00 00 00 mov $0x65,%eax # e
3f25: 48 c7 c2 00 00 00 00 mov $0x0,%rdx
3f28: R_X86_64_32S .rodata.str1.1+0x18d
3f2c: be 48 00 00 00 mov $0x48,%esi
3f31: c7 44 24 0a 6f 64 75 6c movl $0x6c75646f,0xa(%rsp) # odul
3f39: 66 89 44 24 0e mov %ax,0xe(%rsp)
i.e., the string gets built on the stack at runtime. Similar code can be
found for the other instances I'm replacing here. Putting the string
in .rodata reduces the combined .text+.rodata size and saves time and
stack space at runtime.
The simplest fix, and what I've done for the this_mod case, is to just
make the variable static.
However, for the "<faulted>" case where the same string is used twice,
that prevents the linker from merging those two literals, so instead use
a macro - that also keeps the two instances automatically in
sync (instead of only the compile-time strlen expression).
Finally, for the two runs of spaces, it turns out that the "build
these strings on the stack" is not the worst part of what gcc does -
it turns print_func_help_header_irq() into "if (tgid) { /*
print_event_info + five seq_printf calls */ } else { /* print
event_info + another five seq_printf */}". Taking inspiration from a
suggestion from Al Viro, use %.*s to make snprintf either stop after
the first two spaces or print the whole string. As a bonus, the
seq_printfs now fit on single lines (at least, they are not longer
than the existing ones in the function just above), making it easier
to see that the ascii art lines up.
x86-64 defconfig + CONFIG_FUNCTION_TRACER:
$ scripts/stackdelta /tmp/stackusage.{0,1}
./kernel/trace/ftrace.c ftrace_mod_callback 152 136 -16
./kernel/trace/trace.c trace_default_header 56 32 -24
./kernel/trace/trace.c tracing_mark_raw_write 96 72 -24
./kernel/trace/trace.c tracing_mark_write 104 80 -24
bloat-o-meter
add/remove: 1/0 grow/shrink: 0/4 up/down: 14/-375 (-361)
Function old new delta
this_mod - 14 +14
ftrace_mod_callback 577 542 -35
tracing_mark_raw_write 444 374 -70
tracing_mark_write 616 540 -76
trace_default_header 600 406 -194
Link: http://lkml.kernel.org/r/20190320081757.6037-1-linux@rasmusvillemoes.dk
Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-03-20 08:17:57 +00:00
|
|
|
memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
|
2016-12-08 17:40:18 +00:00
|
|
|
cnt = FAULTED_SIZE;
|
|
|
|
written = -EFAULT;
|
2009-11-16 19:56:13 +00:00
|
|
|
} else
|
2016-12-08 17:40:18 +00:00
|
|
|
written = cnt;
|
|
|
|
len = cnt;
|
2008-09-16 19:06:42 +00:00
|
|
|
|
2018-05-09 18:17:48 +00:00
|
|
|
if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
|
|
|
|
/* do not add \n before testing triggers, but add \0 */
|
|
|
|
entry->buf[cnt] = '\0';
|
|
|
|
tt = event_triggers_call(tr->trace_marker_file, entry, event);
|
|
|
|
}
|
|
|
|
|
2011-09-22 15:50:27 +00:00
|
|
|
if (entry->buf[cnt - 1] != '\n') {
|
|
|
|
entry->buf[cnt] = '\n';
|
|
|
|
entry->buf[cnt + 1] = '\0';
|
|
|
|
} else
|
|
|
|
entry->buf[cnt] = '\0';
|
|
|
|
|
2012-10-11 16:14:25 +00:00
|
|
|
__buffer_unlock_commit(buffer, event);
|
2008-09-16 19:06:42 +00:00
|
|
|
|
2018-05-09 18:17:48 +00:00
|
|
|
if (tt)
|
|
|
|
event_triggers_post_call(tr->trace_marker_file, tt);
|
|
|
|
|
2016-12-08 17:40:18 +00:00
|
|
|
if (written > 0)
|
|
|
|
*fpos += written;
|
2008-09-16 19:06:42 +00:00
|
|
|
|
2016-07-06 19:25:08 +00:00
|
|
|
return written;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Limit it for now to 3K (including tag) */
|
|
|
|
#define RAW_DATA_MAX_SIZE (1024*3)
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *fpos)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = filp->private_data;
|
|
|
|
struct ring_buffer_event *event;
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2016-07-06 19:25:08 +00:00
|
|
|
struct raw_data_entry *entry;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
ssize_t written;
|
|
|
|
int size;
|
|
|
|
int len;
|
|
|
|
|
2016-12-08 17:40:18 +00:00
|
|
|
#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
|
|
|
|
|
2016-07-06 19:25:08 +00:00
|
|
|
if (tracing_disabled)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!(tr->trace_flags & TRACE_ITER_MARKERS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* The marker must at least have a tag id */
|
|
|
|
if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (cnt > TRACE_BUF_SIZE)
|
|
|
|
cnt = TRACE_BUF_SIZE;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
|
|
|
|
|
|
|
|
local_save_flags(irq_flags);
|
|
|
|
size = sizeof(*entry) + cnt;
|
2016-12-08 17:40:18 +00:00
|
|
|
if (cnt < FAULT_SIZE_ID)
|
|
|
|
size += FAULT_SIZE_ID - cnt;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
buffer = tr->array_buffer.buffer;
|
2016-11-23 16:29:58 +00:00
|
|
|
event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
|
|
|
|
irq_flags, preempt_count());
|
2016-12-08 17:40:18 +00:00
|
|
|
if (!event)
|
2016-07-06 19:25:08 +00:00
|
|
|
/* Ring buffer disabled, return as if not open for write */
|
2016-12-08 17:40:18 +00:00
|
|
|
return -EBADF;
|
2016-07-06 19:25:08 +00:00
|
|
|
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
|
2016-12-08 17:40:18 +00:00
|
|
|
len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
|
|
|
|
if (len) {
|
|
|
|
entry->id = -1;
|
tracing: Eliminate const char[] auto variables
Automatic const char[] variables cause unnecessary code
generation. For example, the this_mod variable leads to
3f04: 48 b8 5f 5f 74 68 69 73 5f 6d movabs $0x6d5f736968745f5f,%rax # __this_m
3f0e: 4c 8d 44 24 02 lea 0x2(%rsp),%r8
3f13: 48 8d 7c 24 10 lea 0x10(%rsp),%rdi
3f18: 48 89 44 24 02 mov %rax,0x2(%rsp)
3f1d: 4c 89 e9 mov %r13,%rcx
3f20: b8 65 00 00 00 mov $0x65,%eax # e
3f25: 48 c7 c2 00 00 00 00 mov $0x0,%rdx
3f28: R_X86_64_32S .rodata.str1.1+0x18d
3f2c: be 48 00 00 00 mov $0x48,%esi
3f31: c7 44 24 0a 6f 64 75 6c movl $0x6c75646f,0xa(%rsp) # odul
3f39: 66 89 44 24 0e mov %ax,0xe(%rsp)
i.e., the string gets built on the stack at runtime. Similar code can be
found for the other instances I'm replacing here. Putting the string
in .rodata reduces the combined .text+.rodata size and saves time and
stack space at runtime.
The simplest fix, and what I've done for the this_mod case, is to just
make the variable static.
However, for the "<faulted>" case where the same string is used twice,
that prevents the linker from merging those two literals, so instead use
a macro - that also keeps the two instances automatically in
sync (instead of only the compile-time strlen expression).
Finally, for the two runs of spaces, it turns out that the "build
these strings on the stack" is not the worst part of what gcc does -
it turns print_func_help_header_irq() into "if (tgid) { /*
print_event_info + five seq_printf calls */ } else { /* print
event_info + another five seq_printf */}". Taking inspiration from a
suggestion from Al Viro, use %.*s to make snprintf either stop after
the first two spaces or print the whole string. As a bonus, the
seq_printfs now fit on single lines (at least, they are not longer
than the existing ones in the function just above), making it easier
to see that the ascii art lines up.
x86-64 defconfig + CONFIG_FUNCTION_TRACER:
$ scripts/stackdelta /tmp/stackusage.{0,1}
./kernel/trace/ftrace.c ftrace_mod_callback 152 136 -16
./kernel/trace/trace.c trace_default_header 56 32 -24
./kernel/trace/trace.c tracing_mark_raw_write 96 72 -24
./kernel/trace/trace.c tracing_mark_write 104 80 -24
bloat-o-meter
add/remove: 1/0 grow/shrink: 0/4 up/down: 14/-375 (-361)
Function old new delta
this_mod - 14 +14
ftrace_mod_callback 577 542 -35
tracing_mark_raw_write 444 374 -70
tracing_mark_write 616 540 -76
trace_default_header 600 406 -194
Link: http://lkml.kernel.org/r/20190320081757.6037-1-linux@rasmusvillemoes.dk
Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-03-20 08:17:57 +00:00
|
|
|
memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
|
2016-12-08 17:40:18 +00:00
|
|
|
written = -EFAULT;
|
2016-07-06 19:25:08 +00:00
|
|
|
} else
|
2016-12-08 17:40:18 +00:00
|
|
|
written = cnt;
|
2016-07-06 19:25:08 +00:00
|
|
|
|
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
|
|
2016-12-08 17:40:18 +00:00
|
|
|
if (written > 0)
|
|
|
|
*fpos += written;
|
tracing: Sanitize value returned from write(trace_marker, "...", len)
When userspace code writes non-new-line-terminated string to trace_marker
file, write handler appends new-line and returns number of bytes written
to trace buffer, so
write(fd, "abc", 3) will return 4
That's unexpected and unfortunately it confuses glibc's fprintf function.
Example:
int main() {
fprintf(stderr, "abc");
return 0;
}
$ gcc test.c -o test
$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer
$ ./test 2>/sys/kernel/debug/tracing/trace_marker
results in infinite loop:
write(fd, "abc", 3) = 4
write(fd, "", 1) = 0
write(fd, "", 1) = 0
write(fd, "", 1) = 0
write(fd, "", 1) = 0
write(fd, "", 1) = 0
write(fd, "", 1) = 0
write(fd, "", 1) = 0
(...)
...and kernel trace buffer full of empty markers.
Fix it by sanitizing write return value.
Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
LKML-Reference: <20100727231801.GB2826@joi.lan>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-07-27 23:18:01 +00:00
|
|
|
|
|
|
|
return written;
|
2008-09-16 19:06:42 +00:00
|
|
|
}
|
|
|
|
|
2009-12-08 03:16:11 +00:00
|
|
|
static int tracing_clock_show(struct seq_file *m, void *v)
|
2009-08-25 08:12:56 +00:00
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
struct trace_array *tr = m->private;
|
2009-08-25 08:12:56 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
|
2009-12-08 03:16:11 +00:00
|
|
|
seq_printf(m,
|
2009-08-25 08:12:56 +00:00
|
|
|
"%s%s%s%s", i ? " " : "",
|
2012-05-11 17:29:49 +00:00
|
|
|
i == tr->clock_id ? "[" : "", trace_clocks[i].name,
|
|
|
|
i == tr->clock_id ? "]" : "");
|
2009-12-08 03:16:11 +00:00
|
|
|
seq_putc(m, '\n');
|
2009-08-25 08:12:56 +00:00
|
|
|
|
2009-12-08 03:16:11 +00:00
|
|
|
return 0;
|
2009-08-25 08:12:56 +00:00
|
|
|
}
|
|
|
|
|
2018-01-16 02:52:07 +00:00
|
|
|
int tracing_set_clock(struct trace_array *tr, const char *clockstr)
|
2009-08-25 08:12:56 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
|
|
|
|
if (strcmp(trace_clocks[i].name, clockstr) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == ARRAY_SIZE(trace_clocks))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
tr->clock_id = i;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
|
2009-08-25 08:12:56 +00:00
|
|
|
|
2012-10-11 23:27:52 +00:00
|
|
|
/*
|
|
|
|
* New clock may not be consistent with the previous clock.
|
|
|
|
* Reset the buffer so that it doesn't have incomparable timestamps.
|
|
|
|
*/
|
2020-01-09 23:53:48 +00:00
|
|
|
tracing_reset_online_cpus(&tr->array_buffer);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2017-09-05 21:57:19 +00:00
|
|
|
if (tr->max_buffer.buffer)
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
|
2013-08-03 01:36:16 +00:00
|
|
|
tracing_reset_online_cpus(&tr->max_buffer);
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
#endif
|
2012-10-11 23:27:52 +00:00
|
|
|
|
2009-08-25 08:12:56 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2014-02-11 04:38:46 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *fpos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct trace_array *tr = m->private;
|
|
|
|
char buf[64];
|
|
|
|
const char *clockstr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cnt >= sizeof(buf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-04-18 07:23:29 +00:00
|
|
|
if (copy_from_user(buf, ubuf, cnt))
|
2014-02-11 04:38:46 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
buf[cnt] = 0;
|
|
|
|
|
|
|
|
clockstr = strstrip(buf);
|
|
|
|
|
|
|
|
ret = tracing_set_clock(tr, clockstr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2009-08-25 08:12:56 +00:00
|
|
|
*fpos += cnt;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2009-12-08 03:16:11 +00:00
|
|
|
static int tracing_clock_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-07-02 03:34:22 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
int ret;
|
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-02 03:34:22 +00:00
|
|
|
|
|
|
|
ret = single_open(file, tracing_clock_show, inode->i_private);
|
|
|
|
if (ret < 0)
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
|
|
|
return ret;
|
2009-12-08 03:16:11 +00:00
|
|
|
}
|
|
|
|
|
2018-01-16 02:51:41 +00:00
|
|
|
static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = m->private;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
|
2018-01-16 02:51:41 +00:00
|
|
|
seq_puts(m, "delta [absolute]\n");
|
|
|
|
else
|
|
|
|
seq_puts(m, "[delta] absolute\n");
|
|
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
int ret;
|
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-01-16 02:51:41 +00:00
|
|
|
|
|
|
|
ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
|
|
|
|
if (ret < 0)
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-16 02:51:39 +00:00
|
|
|
int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
|
|
if (abs && tr->time_stamp_abs_ref++)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!abs) {
|
|
|
|
if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (--tr->time_stamp_abs_ref)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
|
2018-01-16 02:51:39 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
if (tr->max_buffer.buffer)
|
|
|
|
ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
|
|
|
|
#endif
|
|
|
|
out:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-05 21:18:16 +00:00
|
|
|
struct ftrace_buffer_info {
|
|
|
|
struct trace_iterator iter;
|
|
|
|
void *spare;
|
2017-05-01 13:35:09 +00:00
|
|
|
unsigned int spare_cpu;
|
2013-03-05 21:18:16 +00:00
|
|
|
unsigned int read;
|
|
|
|
};
|
|
|
|
|
2012-12-26 02:53:00 +00:00
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
|
|
static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-07-23 15:26:10 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2012-12-26 02:53:00 +00:00
|
|
|
struct trace_iterator *iter;
|
2012-05-11 17:29:49 +00:00
|
|
|
struct seq_file *m;
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
int ret;
|
2012-12-26 02:53:00 +00:00
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-02 02:50:29 +00:00
|
|
|
|
2012-12-26 02:53:00 +00:00
|
|
|
if (file->f_mode & FMODE_READ) {
|
2013-07-23 15:26:10 +00:00
|
|
|
iter = __tracing_open(inode, file, true);
|
2012-12-26 02:53:00 +00:00
|
|
|
if (IS_ERR(iter))
|
|
|
|
ret = PTR_ERR(iter);
|
2012-05-11 17:29:49 +00:00
|
|
|
} else {
|
|
|
|
/* Writes still need the seq_file to hold the private data */
|
2013-07-18 18:18:44 +00:00
|
|
|
ret = -ENOMEM;
|
2012-05-11 17:29:49 +00:00
|
|
|
m = kzalloc(sizeof(*m), GFP_KERNEL);
|
|
|
|
if (!m)
|
2013-07-18 18:18:44 +00:00
|
|
|
goto out;
|
2012-05-11 17:29:49 +00:00
|
|
|
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
|
|
|
if (!iter) {
|
|
|
|
kfree(m);
|
2013-07-18 18:18:44 +00:00
|
|
|
goto out;
|
2012-05-11 17:29:49 +00:00
|
|
|
}
|
2013-07-18 18:18:44 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2013-07-02 02:50:29 +00:00
|
|
|
iter->tr = tr;
|
2020-01-09 23:53:48 +00:00
|
|
|
iter->array_buffer = &tr->max_buffer;
|
2013-07-23 15:26:10 +00:00
|
|
|
iter->cpu_file = tracing_get_cpu(inode);
|
2012-05-11 17:29:49 +00:00
|
|
|
m->private = iter;
|
|
|
|
file->private_data = m;
|
2012-12-26 02:53:00 +00:00
|
|
|
}
|
2013-07-18 18:18:44 +00:00
|
|
|
out:
|
2013-07-02 02:50:29 +00:00
|
|
|
if (ret < 0)
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
2012-12-26 02:53:00 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct trace_iterator *iter = m->private;
|
|
|
|
struct trace_array *tr = iter->tr;
|
2012-12-26 02:53:00 +00:00
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tracing_update_buffers();
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->current_trace->use_max_tr) {
|
2012-12-26 02:53:00 +00:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
arch_spin_lock(&tr->max_lock);
|
|
|
|
if (tr->cond_snapshot)
|
|
|
|
ret = -EBUSY;
|
|
|
|
arch_spin_unlock(&tr->max_lock);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2012-12-26 02:53:00 +00:00
|
|
|
switch (val) {
|
|
|
|
case 0:
|
2013-03-05 19:35:11 +00:00
|
|
|
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
2012-12-26 02:53:00 +00:00
|
|
|
}
|
2013-03-12 15:17:54 +00:00
|
|
|
if (tr->allocated_snapshot)
|
|
|
|
free_snapshot(tr);
|
2012-12-26 02:53:00 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2013-03-05 19:35:11 +00:00
|
|
|
/* Only allow per-cpu swap if the ring buffer supports it */
|
|
|
|
#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
|
|
|
|
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
2019-06-25 01:29:10 +00:00
|
|
|
if (tr->allocated_snapshot)
|
|
|
|
ret = resize_buffer_duplicate_size(&tr->max_buffer,
|
2020-01-09 23:53:48 +00:00
|
|
|
&tr->array_buffer, iter->cpu_file);
|
2019-06-25 01:29:10 +00:00
|
|
|
else
|
2018-05-28 14:56:36 +00:00
|
|
|
ret = tracing_alloc_snapshot_instance(tr);
|
2019-06-25 01:29:10 +00:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2012-12-26 02:53:00 +00:00
|
|
|
local_irq_disable();
|
|
|
|
/* Now, we're going to swap */
|
2013-03-05 19:35:11 +00:00
|
|
|
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
|
tracing: Add conditional snapshot
Currently, tracing snapshots are context-free - they capture the ring
buffer contents at the time the tracing_snapshot() function was
invoked, and nothing else. Additionally, they're always taken
unconditionally - the calling code can decide whether or not to take a
snapshot, but the data used to make that decision is kept separately
from the snapshot itself.
This change adds the ability to associate with each trace instance
some user data, along with an 'update' function that can use that data
to determine whether or not to actually take a snapshot. The update
function can then update that data along with any other state (as part
of the data presumably), if warranted.
Because snapshots are 'global' per-instance, only one user can enable
and use a conditional snapshot for any given trace instance. To
enable a conditional snapshot (see details in the function and data
structure comments), the user calls tracing_snapshot_cond_enable().
Similarly, to disable a conditional snapshot and free it up for other
users, tracing_snapshot_cond_disable() should be called.
To actually initiate a conditional snapshot, tracing_snapshot_cond()
should be called. tracing_snapshot_cond() will invoke the update()
callback, allowing the user to decide whether or not to actually take
the snapshot and update the user-defined data associated with the
snapshot. If the callback returns 'true', tracing_snapshot_cond()
will then actually take the snapshot and return.
This scheme allows for flexibility in snapshot implementations - for
example, by implementing slightly different update() callbacks,
snapshots can be taken in situations where the user is only interested
in taking a snapshot when a new maximum in hit versus when a value
changes in any way at all. Future patches will demonstrate both
cases.
Link: http://lkml.kernel.org/r/1bea07828d5fd6864a585f83b1eed47ce097eb45.1550100284.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-02-13 23:42:45 +00:00
|
|
|
update_max_tr(tr, current, smp_processor_id(), NULL);
|
2013-03-05 19:35:11 +00:00
|
|
|
else
|
2013-03-06 02:23:55 +00:00
|
|
|
update_max_tr_single(tr, current, iter->cpu_file);
|
2012-12-26 02:53:00 +00:00
|
|
|
local_irq_enable();
|
|
|
|
break;
|
|
|
|
default:
|
2013-03-05 23:25:02 +00:00
|
|
|
if (tr->allocated_snapshot) {
|
2013-03-05 19:35:11 +00:00
|
|
|
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
|
|
|
|
tracing_reset_online_cpus(&tr->max_buffer);
|
|
|
|
else
|
2019-08-13 16:14:35 +00:00
|
|
|
tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
|
2013-03-05 19:35:11 +00:00
|
|
|
}
|
2012-12-26 02:53:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret >= 0) {
|
|
|
|
*ppos += cnt;
|
|
|
|
ret = cnt;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-05-11 17:29:49 +00:00
|
|
|
|
|
|
|
static int tracing_snapshot_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
2013-07-02 02:50:29 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tracing_release(inode, file);
|
2012-05-11 17:29:49 +00:00
|
|
|
|
|
|
|
if (file->f_mode & FMODE_READ)
|
2013-07-02 02:50:29 +00:00
|
|
|
return ret;
|
2012-05-11 17:29:49 +00:00
|
|
|
|
|
|
|
/* If write only, the seq_file is just a stub */
|
|
|
|
if (m)
|
|
|
|
kfree(m->private);
|
|
|
|
kfree(m);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-05 21:18:16 +00:00
|
|
|
static int tracing_buffers_open(struct inode *inode, struct file *filp);
|
|
|
|
static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t count, loff_t *ppos);
|
|
|
|
static int tracing_buffers_release(struct inode *inode, struct file *file);
|
|
|
|
static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|
|
|
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
|
|
|
|
|
|
|
|
static int snapshot_raw_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct ftrace_buffer_info *info;
|
|
|
|
int ret;
|
|
|
|
|
2019-10-11 21:22:50 +00:00
|
|
|
/* The following checks for tracefs lockdown */
|
2013-03-05 21:18:16 +00:00
|
|
|
ret = tracing_buffers_open(inode, filp);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
info = filp->private_data;
|
|
|
|
|
|
|
|
if (info->iter.trace->use_max_tr) {
|
|
|
|
tracing_buffers_release(inode, filp);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->iter.snapshot = true;
|
2020-01-09 23:53:48 +00:00
|
|
|
info->iter.array_buffer = &info->iter.tr->max_buffer;
|
2013-03-05 21:18:16 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-12-26 02:53:00 +00:00
|
|
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
|
|
|
|
|
|
|
|
2014-07-18 11:17:27 +00:00
|
|
|
static const struct file_operations tracing_thresh_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = tracing_thresh_read,
|
|
|
|
.write = tracing_thresh_write,
|
|
|
|
.llseek = generic_file_llseek,
|
|
|
|
};
|
|
|
|
|
2016-09-07 16:45:09 +00:00
|
|
|
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_max_lat_fops = {
|
2008-05-12 19:20:46 +00:00
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = tracing_max_lat_read,
|
|
|
|
.write = tracing_max_lat_write,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
2015-11-09 21:15:15 +00:00
|
|
|
#endif
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations set_tracer_fops = {
|
2008-05-12 19:20:46 +00:00
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = tracing_set_trace_read,
|
|
|
|
.write = tracing_set_trace_write,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_pipe_fops = {
|
2008-05-12 19:20:46 +00:00
|
|
|
.open = tracing_open_pipe,
|
2008-05-12 19:20:49 +00:00
|
|
|
.poll = tracing_poll_pipe,
|
2008-05-12 19:20:46 +00:00
|
|
|
.read = tracing_read_pipe,
|
2009-02-09 06:15:56 +00:00
|
|
|
.splice_read = tracing_splice_read_pipe,
|
2008-05-12 19:20:46 +00:00
|
|
|
.release = tracing_release_pipe,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = no_llseek,
|
2008-05-12 19:20:46 +00:00
|
|
|
};
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_entries_fops = {
|
2013-07-23 15:26:06 +00:00
|
|
|
.open = tracing_open_generic_tr,
|
2008-05-12 19:20:59 +00:00
|
|
|
.read = tracing_entries_read,
|
|
|
|
.write = tracing_entries_write,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2013-07-23 15:26:06 +00:00
|
|
|
.release = tracing_release_generic_tr,
|
2008-05-12 19:20:59 +00:00
|
|
|
};
|
|
|
|
|
2011-08-16 21:46:15 +00:00
|
|
|
static const struct file_operations tracing_total_entries_fops = {
|
2013-07-02 03:34:22 +00:00
|
|
|
.open = tracing_open_generic_tr,
|
2011-08-16 21:46:15 +00:00
|
|
|
.read = tracing_total_entries_read,
|
|
|
|
.llseek = generic_file_llseek,
|
2013-07-02 03:34:22 +00:00
|
|
|
.release = tracing_release_generic_tr,
|
2011-08-16 21:46:15 +00:00
|
|
|
};
|
|
|
|
|
2011-06-14 00:51:57 +00:00
|
|
|
static const struct file_operations tracing_free_buffer_fops = {
|
2013-07-02 03:34:22 +00:00
|
|
|
.open = tracing_open_generic_tr,
|
2011-06-14 00:51:57 +00:00
|
|
|
.write = tracing_free_buffer_write,
|
|
|
|
.release = tracing_free_buffer_release,
|
|
|
|
};
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_mark_fops = {
|
2013-07-02 03:34:22 +00:00
|
|
|
.open = tracing_open_generic_tr,
|
2008-09-16 19:06:42 +00:00
|
|
|
.write = tracing_mark_write,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2013-07-02 03:34:22 +00:00
|
|
|
.release = tracing_release_generic_tr,
|
2008-09-16 19:06:42 +00:00
|
|
|
};
|
|
|
|
|
2016-07-06 19:25:08 +00:00
|
|
|
static const struct file_operations tracing_mark_raw_fops = {
|
|
|
|
.open = tracing_open_generic_tr,
|
|
|
|
.write = tracing_mark_raw_write,
|
|
|
|
.llseek = generic_file_llseek,
|
|
|
|
.release = tracing_release_generic_tr,
|
|
|
|
};
|
|
|
|
|
2009-08-25 08:12:56 +00:00
|
|
|
static const struct file_operations trace_clock_fops = {
|
2009-12-08 03:16:11 +00:00
|
|
|
.open = tracing_clock_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2013-07-02 03:34:22 +00:00
|
|
|
.release = tracing_single_release_tr,
|
2009-08-25 08:12:56 +00:00
|
|
|
.write = tracing_clock_write,
|
|
|
|
};
|
|
|
|
|
2018-01-16 02:51:41 +00:00
|
|
|
static const struct file_operations trace_time_stamp_mode_fops = {
|
|
|
|
.open = tracing_time_stamp_mode_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = tracing_single_release_tr,
|
|
|
|
};
|
|
|
|
|
2012-12-26 02:53:00 +00:00
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
|
|
static const struct file_operations snapshot_fops = {
|
|
|
|
.open = tracing_snapshot_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = tracing_snapshot_write,
|
2013-12-21 22:39:40 +00:00
|
|
|
.llseek = tracing_lseek,
|
2012-05-11 17:29:49 +00:00
|
|
|
.release = tracing_snapshot_release,
|
2012-12-26 02:53:00 +00:00
|
|
|
};
|
|
|
|
|
2013-03-05 21:18:16 +00:00
|
|
|
static const struct file_operations snapshot_raw_fops = {
|
|
|
|
.open = snapshot_raw_open,
|
|
|
|
.read = tracing_buffers_read,
|
|
|
|
.release = tracing_buffers_release,
|
|
|
|
.splice_read = tracing_buffers_splice_read,
|
|
|
|
.llseek = no_llseek,
|
2008-12-02 03:20:19 +00:00
|
|
|
};
|
|
|
|
|
2013-03-05 21:18:16 +00:00
|
|
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
|
|
|
|
2019-03-31 23:48:15 +00:00
|
|
|
#define TRACING_LOG_ERRS_MAX 8
|
|
|
|
#define TRACING_LOG_LOC_MAX 128
|
|
|
|
|
|
|
|
#define CMD_PREFIX " Command: "
|
|
|
|
|
|
|
|
struct err_info {
|
|
|
|
const char **errs; /* ptr to loc-specific array of err strings */
|
|
|
|
u8 type; /* index into errs -> specific err string */
|
|
|
|
u8 pos; /* MAX_FILTER_STR_VAL = 256 */
|
|
|
|
u64 ts;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct tracing_log_err {
|
|
|
|
struct list_head list;
|
|
|
|
struct err_info info;
|
|
|
|
char loc[TRACING_LOG_LOC_MAX]; /* err location */
|
|
|
|
char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
|
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(tracing_err_log_lock);
|
|
|
|
|
2019-06-14 15:32:10 +00:00
|
|
|
static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
|
2019-03-31 23:48:15 +00:00
|
|
|
{
|
|
|
|
struct tracing_log_err *err;
|
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
|
2019-03-31 23:48:15 +00:00
|
|
|
err = kzalloc(sizeof(*err), GFP_KERNEL);
|
|
|
|
if (!err)
|
|
|
|
err = ERR_PTR(-ENOMEM);
|
2019-04-02 02:52:21 +00:00
|
|
|
tr->n_err_log_entries++;
|
2019-03-31 23:48:15 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
|
2019-03-31 23:48:15 +00:00
|
|
|
list_del(&err->list);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* err_pos - find the position of a string within a command for error careting
|
|
|
|
* @cmd: The tracing command that caused the error
|
|
|
|
* @str: The string to position the caret at within @cmd
|
|
|
|
*
|
|
|
|
* Finds the position of the first occurence of @str within @cmd. The
|
|
|
|
* return value can be passed to tracing_log_err() for caret placement
|
|
|
|
* within @cmd.
|
|
|
|
*
|
|
|
|
* Returns the index within @cmd of the first occurence of @str or 0
|
|
|
|
* if @str was not found.
|
|
|
|
*/
|
|
|
|
unsigned int err_pos(char *cmd, const char *str)
|
|
|
|
{
|
|
|
|
char *found;
|
|
|
|
|
|
|
|
if (WARN_ON(!strlen(cmd)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
found = strstr(cmd, str);
|
|
|
|
if (found)
|
|
|
|
return found - cmd;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tracing_log_err - write an error to the tracing error log
|
2019-04-02 02:52:21 +00:00
|
|
|
* @tr: The associated trace array for the error (NULL for top level array)
|
2019-03-31 23:48:15 +00:00
|
|
|
* @loc: A string describing where the error occurred
|
|
|
|
* @cmd: The tracing command that caused the error
|
|
|
|
* @errs: The array of loc-specific static error strings
|
|
|
|
* @type: The index into errs[], which produces the specific static err string
|
|
|
|
* @pos: The position the caret should be placed in the cmd
|
|
|
|
*
|
|
|
|
* Writes an error into tracing/error_log of the form:
|
|
|
|
*
|
|
|
|
* <loc>: error: <text>
|
|
|
|
* Command: <cmd>
|
|
|
|
* ^
|
|
|
|
*
|
|
|
|
* tracing/error_log is a small log file containing the last
|
|
|
|
* TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
|
|
|
|
* unless there has been a tracing error, and the error log can be
|
|
|
|
* cleared and have its memory freed by writing the empty string in
|
|
|
|
* truncation mode to it i.e. echo > tracing/error_log.
|
|
|
|
*
|
|
|
|
* NOTE: the @errs array along with the @type param are used to
|
|
|
|
* produce a static error string - this string is not copied and saved
|
|
|
|
* when the error is logged - only a pointer to it is saved. See
|
|
|
|
* existing callers for examples of how static strings are typically
|
|
|
|
* defined for use with tracing_log_err().
|
|
|
|
*/
|
2019-04-02 02:52:21 +00:00
|
|
|
void tracing_log_err(struct trace_array *tr,
|
|
|
|
const char *loc, const char *cmd,
|
2019-03-31 23:48:15 +00:00
|
|
|
const char **errs, u8 type, u8 pos)
|
|
|
|
{
|
|
|
|
struct tracing_log_err *err;
|
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
if (!tr)
|
|
|
|
tr = &global_trace;
|
|
|
|
|
2019-03-31 23:48:15 +00:00
|
|
|
mutex_lock(&tracing_err_log_lock);
|
2019-04-02 02:52:21 +00:00
|
|
|
err = get_tracing_log_err(tr);
|
2019-03-31 23:48:15 +00:00
|
|
|
if (PTR_ERR(err) == -ENOMEM) {
|
|
|
|
mutex_unlock(&tracing_err_log_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
|
|
|
|
snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
|
|
|
|
|
|
|
|
err->info.errs = errs;
|
|
|
|
err->info.type = type;
|
|
|
|
err->info.pos = pos;
|
|
|
|
err->info.ts = local_clock();
|
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
list_add_tail(&err->list, &tr->err_log);
|
2019-03-31 23:48:15 +00:00
|
|
|
mutex_unlock(&tracing_err_log_lock);
|
|
|
|
}
|
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
static void clear_tracing_err_log(struct trace_array *tr)
|
2019-03-31 23:48:15 +00:00
|
|
|
{
|
|
|
|
struct tracing_log_err *err, *next;
|
|
|
|
|
|
|
|
mutex_lock(&tracing_err_log_lock);
|
2019-04-02 02:52:21 +00:00
|
|
|
list_for_each_entry_safe(err, next, &tr->err_log, list) {
|
2019-03-31 23:48:15 +00:00
|
|
|
list_del(&err->list);
|
|
|
|
kfree(err);
|
|
|
|
}
|
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
tr->n_err_log_entries = 0;
|
2019-03-31 23:48:15 +00:00
|
|
|
mutex_unlock(&tracing_err_log_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
2019-04-02 02:52:21 +00:00
|
|
|
struct trace_array *tr = m->private;
|
|
|
|
|
2019-03-31 23:48:15 +00:00
|
|
|
mutex_lock(&tracing_err_log_lock);
|
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
return seq_list_start(&tr->err_log, *pos);
|
2019-03-31 23:48:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
2019-04-02 02:52:21 +00:00
|
|
|
struct trace_array *tr = m->private;
|
|
|
|
|
|
|
|
return seq_list_next(v, &tr->err_log, pos);
|
2019-03-31 23:48:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
mutex_unlock(&tracing_err_log_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
|
|
|
|
{
|
|
|
|
u8 i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
|
|
|
|
seq_putc(m, ' ');
|
|
|
|
for (i = 0; i < pos; i++)
|
|
|
|
seq_putc(m, ' ');
|
|
|
|
seq_puts(m, "^\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tracing_err_log_seq_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct tracing_log_err *err = v;
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
const char *err_text = err->info.errs[err->info.type];
|
|
|
|
u64 sec = err->info.ts;
|
|
|
|
u32 nsec;
|
|
|
|
|
|
|
|
nsec = do_div(sec, NSEC_PER_SEC);
|
|
|
|
seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
|
|
|
|
err->loc, err_text);
|
|
|
|
seq_printf(m, "%s", err->cmd);
|
|
|
|
tracing_err_log_show_pos(m, err->info.pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations tracing_err_log_seq_ops = {
|
|
|
|
.start = tracing_err_log_seq_start,
|
|
|
|
.next = tracing_err_log_seq_next,
|
|
|
|
.stop = tracing_err_log_seq_stop,
|
|
|
|
.show = tracing_err_log_seq_show
|
|
|
|
};
|
|
|
|
|
|
|
|
static int tracing_err_log_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2019-04-02 02:52:21 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2019-03-31 23:48:15 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-04-02 02:52:21 +00:00
|
|
|
|
2019-03-31 23:48:15 +00:00
|
|
|
/* If this file was opened for write, then erase contents */
|
|
|
|
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
|
2019-04-02 02:52:21 +00:00
|
|
|
clear_tracing_err_log(tr);
|
2019-03-31 23:48:15 +00:00
|
|
|
|
2019-04-02 02:52:21 +00:00
|
|
|
if (file->f_mode & FMODE_READ) {
|
2019-03-31 23:48:15 +00:00
|
|
|
ret = seq_open(file, &tracing_err_log_seq_ops);
|
2019-04-02 02:52:21 +00:00
|
|
|
if (!ret) {
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
m->private = tr;
|
|
|
|
} else {
|
|
|
|
trace_array_put(tr);
|
|
|
|
}
|
|
|
|
}
|
2019-03-31 23:48:15 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tracing_err_log_write(struct file *file,
|
|
|
|
const char __user *buffer,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2019-06-28 10:56:40 +00:00
|
|
|
static int tracing_err_log_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
|
|
|
if (file->f_mode & FMODE_READ)
|
|
|
|
seq_release(inode, file);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-31 23:48:15 +00:00
|
|
|
static const struct file_operations tracing_err_log_fops = {
|
|
|
|
.open = tracing_err_log_open,
|
|
|
|
.write = tracing_err_log_write,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2019-06-28 10:56:40 +00:00
|
|
|
.release = tracing_err_log_release,
|
2019-03-31 23:48:15 +00:00
|
|
|
};
|
|
|
|
|
2008-12-02 03:20:19 +00:00
|
|
|
static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2013-07-23 15:26:00 +00:00
|
|
|
struct trace_array *tr = inode->i_private;
|
2008-12-02 03:20:19 +00:00
|
|
|
struct ftrace_buffer_info *info;
|
2013-07-02 03:34:22 +00:00
|
|
|
int ret;
|
2008-12-02 03:20:19 +00:00
|
|
|
|
tracing: Add tracing_check_open_get_tr()
Currently, most files in the tracefs directory test if tracing_disabled is
set. If so, it should return -ENODEV. The tracing_disabled is called when
tracing is found to be broken. Originally it was done in case the ring
buffer was found to be corrupted, and we wanted to prevent reading it from
crashing the kernel. But it's also called if a tracing selftest fails on
boot. It's a one way switch. That is, once it is triggered, tracing is
disabled until reboot.
As most tracefs files can also be used by instances in the tracefs
directory, they need to be carefully done. Each instance has a trace_array
associated to it, and when the instance is removed, the trace_array is
freed. But if an instance is opened with a reference to the trace_array,
then it requires looking up the trace_array to get its ref counter (as there
could be a race with it being deleted and the open itself). Once it is
found, a reference is added to prevent the instance from being removed (and
the trace_array associated with it freed).
Combine the two checks (tracing_disabled and trace_array_get()) into a
single helper function. This will also make it easier to add lockdown to
tracefs later.
Link: http://lkml.kernel.org/r/20191011135458.7399da44@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-11 21:39:57 +00:00
|
|
|
ret = tracing_check_open_get_tr(tr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-02 03:34:22 +00:00
|
|
|
|
2020-07-31 00:27:45 +00:00
|
|
|
info = kvzalloc(sizeof(*info), GFP_KERNEL);
|
2013-07-02 03:34:22 +00:00
|
|
|
if (!info) {
|
|
|
|
trace_array_put(tr);
|
2008-12-02 03:20:19 +00:00
|
|
|
return -ENOMEM;
|
2013-07-02 03:34:22 +00:00
|
|
|
}
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2013-03-06 20:27:24 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2013-02-28 14:17:16 +00:00
|
|
|
info->iter.tr = tr;
|
2013-07-23 15:26:00 +00:00
|
|
|
info->iter.cpu_file = tracing_get_cpu(inode);
|
2013-02-28 18:44:11 +00:00
|
|
|
info->iter.trace = tr->current_trace;
|
2020-01-09 23:53:48 +00:00
|
|
|
info->iter.array_buffer = &tr->array_buffer;
|
2013-02-28 14:17:16 +00:00
|
|
|
info->spare = NULL;
|
2008-12-02 03:20:19 +00:00
|
|
|
/* Force reading ring buffer for first read */
|
2013-02-28 14:17:16 +00:00
|
|
|
info->read = (unsigned int)-1;
|
2008-12-02 03:20:19 +00:00
|
|
|
|
|
|
|
filp->private_data = info;
|
|
|
|
|
2020-06-30 03:45:56 +00:00
|
|
|
tr->trace_ref++;
|
2014-12-16 01:13:31 +00:00
|
|
|
|
2013-03-06 20:27:24 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2013-07-02 03:34:22 +00:00
|
|
|
ret = nonseekable_open(inode, filp);
|
|
|
|
if (ret < 0)
|
|
|
|
trace_array_put(tr);
|
|
|
|
|
|
|
|
return ret;
|
2008-12-02 03:20:19 +00:00
|
|
|
}
|
|
|
|
|
2017-07-03 04:42:43 +00:00
|
|
|
static __poll_t
|
2013-02-28 14:17:16 +00:00
|
|
|
tracing_buffers_poll(struct file *filp, poll_table *poll_table)
|
|
|
|
{
|
|
|
|
struct ftrace_buffer_info *info = filp->private_data;
|
|
|
|
struct trace_iterator *iter = &info->iter;
|
|
|
|
|
|
|
|
return trace_poll(iter, filp, poll_table);
|
|
|
|
}
|
|
|
|
|
2008-12-02 03:20:19 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct ftrace_buffer_info *info = filp->private_data;
|
2013-02-28 14:17:16 +00:00
|
|
|
struct trace_iterator *iter = &info->iter;
|
2017-08-02 18:20:54 +00:00
|
|
|
ssize_t ret = 0;
|
2013-03-05 21:18:16 +00:00
|
|
|
ssize_t size;
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2009-03-05 00:10:05 +00:00
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
|
2013-03-05 21:18:16 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2014-12-16 03:31:07 +00:00
|
|
|
if (iter->snapshot && iter->tr->current_trace->use_max_tr)
|
|
|
|
return -EBUSY;
|
2013-03-05 21:18:16 +00:00
|
|
|
#endif
|
|
|
|
|
2017-05-01 13:35:09 +00:00
|
|
|
if (!info->spare) {
|
2020-01-09 23:53:48 +00:00
|
|
|
info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
iter->cpu_file);
|
2017-08-02 18:20:54 +00:00
|
|
|
if (IS_ERR(info->spare)) {
|
|
|
|
ret = PTR_ERR(info->spare);
|
|
|
|
info->spare = NULL;
|
|
|
|
} else {
|
|
|
|
info->spare_cpu = iter->cpu_file;
|
|
|
|
}
|
2017-05-01 13:35:09 +00:00
|
|
|
}
|
2009-04-02 07:16:59 +00:00
|
|
|
if (!info->spare)
|
2017-08-02 18:20:54 +00:00
|
|
|
return ret;
|
2009-04-02 07:16:59 +00:00
|
|
|
|
2008-12-02 03:20:19 +00:00
|
|
|
/* Do we have previous read data to read? */
|
|
|
|
if (info->read < PAGE_SIZE)
|
|
|
|
goto read;
|
|
|
|
|
2013-02-28 18:44:11 +00:00
|
|
|
again:
|
2013-02-28 14:17:16 +00:00
|
|
|
trace_access_lock(iter->cpu_file);
|
2020-01-09 23:53:48 +00:00
|
|
|
ret = ring_buffer_read_page(iter->array_buffer->buffer,
|
2008-12-02 03:20:19 +00:00
|
|
|
&info->spare,
|
|
|
|
count,
|
2013-02-28 14:17:16 +00:00
|
|
|
iter->cpu_file, 0);
|
|
|
|
trace_access_unlock(iter->cpu_file);
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2013-02-28 18:44:11 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
if (trace_empty(iter)) {
|
2014-12-16 03:31:07 +00:00
|
|
|
if ((filp->f_flags & O_NONBLOCK))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
2018-11-30 01:32:26 +00:00
|
|
|
ret = wait_on_pipe(iter, 0);
|
2014-12-16 03:31:07 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2013-02-28 18:44:11 +00:00
|
|
|
goto again;
|
|
|
|
}
|
2014-12-16 03:31:07 +00:00
|
|
|
return 0;
|
2013-02-28 18:44:11 +00:00
|
|
|
}
|
2011-10-14 14:44:25 +00:00
|
|
|
|
|
|
|
info->read = 0;
|
2013-02-28 18:44:11 +00:00
|
|
|
read:
|
2008-12-02 03:20:19 +00:00
|
|
|
size = PAGE_SIZE - info->read;
|
|
|
|
if (size > count)
|
|
|
|
size = count;
|
|
|
|
|
|
|
|
ret = copy_to_user(ubuf, info->spare + info->read, size);
|
2014-12-16 03:31:07 +00:00
|
|
|
if (ret == size)
|
|
|
|
return -EFAULT;
|
|
|
|
|
2009-03-05 00:10:05 +00:00
|
|
|
size -= ret;
|
|
|
|
|
2008-12-02 03:20:19 +00:00
|
|
|
*ppos += size;
|
|
|
|
info->read += size;
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tracing_buffers_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct ftrace_buffer_info *info = file->private_data;
|
2013-02-28 14:17:16 +00:00
|
|
|
struct trace_iterator *iter = &info->iter;
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2013-03-06 20:27:24 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2020-06-30 03:45:56 +00:00
|
|
|
iter->tr->trace_ref--;
|
2014-12-16 01:13:31 +00:00
|
|
|
|
2013-07-02 02:50:29 +00:00
|
|
|
__trace_array_put(iter->tr);
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2009-04-02 07:16:59 +00:00
|
|
|
if (info->spare)
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_free_read_page(iter->array_buffer->buffer,
|
2017-05-01 13:35:09 +00:00
|
|
|
info->spare_cpu, info->spare);
|
2020-07-31 00:27:45 +00:00
|
|
|
kvfree(info);
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2013-03-06 20:27:24 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
2008-12-02 03:20:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct buffer_ref {
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer;
|
2008-12-02 03:20:19 +00:00
|
|
|
void *page;
|
2017-05-01 13:35:09 +00:00
|
|
|
int cpu;
|
2019-04-04 21:59:25 +00:00
|
|
|
refcount_t refcount;
|
2008-12-02 03:20:19 +00:00
|
|
|
};
|
|
|
|
|
2019-04-04 21:59:25 +00:00
|
|
|
static void buffer_ref_release(struct buffer_ref *ref)
|
|
|
|
{
|
|
|
|
if (!refcount_dec_and_test(&ref->refcount))
|
|
|
|
return;
|
|
|
|
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
|
|
|
kfree(ref);
|
|
|
|
}
|
|
|
|
|
2008-12-02 03:20:19 +00:00
|
|
|
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
|
|
|
|
struct pipe_buffer *buf)
|
|
|
|
{
|
|
|
|
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
|
|
|
|
|
2019-04-04 21:59:25 +00:00
|
|
|
buffer_ref_release(ref);
|
2008-12-02 03:20:19 +00:00
|
|
|
buf->private = 0;
|
|
|
|
}
|
|
|
|
|
2019-04-05 21:02:10 +00:00
|
|
|
static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
|
2008-12-02 03:20:19 +00:00
|
|
|
struct pipe_buffer *buf)
|
|
|
|
{
|
|
|
|
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
|
|
|
|
|
2019-04-26 18:09:55 +00:00
|
|
|
if (refcount_read(&ref->refcount) > INT_MAX/2)
|
2019-04-05 21:02:10 +00:00
|
|
|
return false;
|
|
|
|
|
2019-04-04 21:59:25 +00:00
|
|
|
refcount_inc(&ref->refcount);
|
2019-04-05 21:02:10 +00:00
|
|
|
return true;
|
2008-12-02 03:20:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Pipe buffer operations for a buffer. */
|
2009-12-16 00:46:48 +00:00
|
|
|
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
|
2008-12-02 03:20:19 +00:00
|
|
|
.release = buffer_pipe_buf_release,
|
|
|
|
.get = buffer_pipe_buf_get,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callback from splice_to_pipe(), if we need to release some pages
|
|
|
|
* at the end of the spd in case we error'ed out in filling the pipe.
|
|
|
|
*/
|
|
|
|
static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
|
|
|
|
{
|
|
|
|
struct buffer_ref *ref =
|
|
|
|
(struct buffer_ref *)spd->partial[i].private;
|
|
|
|
|
2019-04-04 21:59:25 +00:00
|
|
|
buffer_ref_release(ref);
|
2008-12-02 03:20:19 +00:00
|
|
|
spd->partial[i].private = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|
|
|
struct pipe_inode_info *pipe, size_t len,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct ftrace_buffer_info *info = file->private_data;
|
2013-02-28 14:17:16 +00:00
|
|
|
struct trace_iterator *iter = &info->iter;
|
2010-05-20 08:43:18 +00:00
|
|
|
struct partial_page partial_def[PIPE_DEF_BUFFERS];
|
|
|
|
struct page *pages_def[PIPE_DEF_BUFFERS];
|
2008-12-02 03:20:19 +00:00
|
|
|
struct splice_pipe_desc spd = {
|
2010-05-20 08:43:18 +00:00
|
|
|
.pages = pages_def,
|
|
|
|
.partial = partial_def,
|
2012-06-12 13:24:40 +00:00
|
|
|
.nr_pages_max = PIPE_DEF_BUFFERS,
|
2008-12-02 03:20:19 +00:00
|
|
|
.ops = &buffer_pipe_buf_ops,
|
|
|
|
.spd_release = buffer_spd_release,
|
|
|
|
};
|
|
|
|
struct buffer_ref *ref;
|
2017-12-23 01:38:57 +00:00
|
|
|
int entries, i;
|
2014-11-06 21:26:07 +00:00
|
|
|
ssize_t ret = 0;
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2013-03-05 21:18:16 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2014-12-16 03:31:07 +00:00
|
|
|
if (iter->snapshot && iter->tr->current_trace->use_max_tr)
|
|
|
|
return -EBUSY;
|
2013-03-05 21:18:16 +00:00
|
|
|
#endif
|
|
|
|
|
2014-12-16 03:31:07 +00:00
|
|
|
if (*ppos & (PAGE_SIZE - 1))
|
|
|
|
return -EINVAL;
|
tracing: fix splice return too large
I got these from strace:
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 16384
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
I wanted to splice_read 4096 bytes, but it returns 8192 or larger.
It is because the return value of tracing_buffers_splice_read()
does not include "zero out any left over data" bytes.
But tracing_buffers_read() includes these bytes, we make them
consistent.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <srostedt@redhat.com>
LKML-Reference: <49D46674.9030804@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-02 07:17:08 +00:00
|
|
|
|
|
|
|
if (len & (PAGE_SIZE - 1)) {
|
2014-12-16 03:31:07 +00:00
|
|
|
if (len < PAGE_SIZE)
|
|
|
|
return -EINVAL;
|
tracing: fix splice return too large
I got these from strace:
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 16384
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
I wanted to splice_read 4096 bytes, but it returns 8192 or larger.
It is because the return value of tracing_buffers_splice_read()
does not include "zero out any left over data" bytes.
But tracing_buffers_read() includes these bytes, we make them
consistent.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <srostedt@redhat.com>
LKML-Reference: <49D46674.9030804@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-02 07:17:08 +00:00
|
|
|
len &= PAGE_MASK;
|
|
|
|
}
|
|
|
|
|
2016-09-17 22:31:46 +00:00
|
|
|
if (splice_grow_spd(pipe, &spd))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-02-28 14:17:16 +00:00
|
|
|
again:
|
|
|
|
trace_access_lock(iter->cpu_file);
|
2020-01-09 23:53:48 +00:00
|
|
|
entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
|
2009-04-29 04:23:13 +00:00
|
|
|
|
2014-04-11 16:01:03 +00:00
|
|
|
for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
|
2008-12-02 03:20:19 +00:00
|
|
|
struct page *page;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
2014-11-06 21:26:07 +00:00
|
|
|
if (!ref) {
|
|
|
|
ret = -ENOMEM;
|
2008-12-02 03:20:19 +00:00
|
|
|
break;
|
2014-11-06 21:26:07 +00:00
|
|
|
}
|
2008-12-02 03:20:19 +00:00
|
|
|
|
2019-04-04 21:59:25 +00:00
|
|
|
refcount_set(&ref->refcount, 1);
|
2020-01-09 23:53:48 +00:00
|
|
|
ref->buffer = iter->array_buffer->buffer;
|
2013-02-28 14:17:16 +00:00
|
|
|
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
2017-08-02 18:20:54 +00:00
|
|
|
if (IS_ERR(ref->page)) {
|
|
|
|
ret = PTR_ERR(ref->page);
|
|
|
|
ref->page = NULL;
|
2008-12-02 03:20:19 +00:00
|
|
|
kfree(ref);
|
|
|
|
break;
|
|
|
|
}
|
2017-05-01 13:35:09 +00:00
|
|
|
ref->cpu = iter->cpu_file;
|
2008-12-02 03:20:19 +00:00
|
|
|
|
|
|
|
r = ring_buffer_read_page(ref->buffer, &ref->page,
|
2013-02-28 14:17:16 +00:00
|
|
|
len, iter->cpu_file, 1);
|
2008-12-02 03:20:19 +00:00
|
|
|
if (r < 0) {
|
2017-05-01 13:35:09 +00:00
|
|
|
ring_buffer_free_read_page(ref->buffer, ref->cpu,
|
|
|
|
ref->page);
|
2008-12-02 03:20:19 +00:00
|
|
|
kfree(ref);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
page = virt_to_page(ref->page);
|
|
|
|
|
|
|
|
spd.pages[i] = page;
|
|
|
|
spd.partial[i].len = PAGE_SIZE;
|
|
|
|
spd.partial[i].offset = 0;
|
|
|
|
spd.partial[i].private = (unsigned long)ref;
|
|
|
|
spd.nr_pages++;
|
tracing: fix splice return too large
I got these from strace:
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 16384
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192
I wanted to splice_read 4096 bytes, but it returns 8192 or larger.
It is because the return value of tracing_buffers_splice_read()
does not include "zero out any left over data" bytes.
But tracing_buffers_read() includes these bytes, we make them
consistent.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <srostedt@redhat.com>
LKML-Reference: <49D46674.9030804@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-02 07:17:08 +00:00
|
|
|
*ppos += PAGE_SIZE;
|
2009-04-29 04:23:13 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
|
2008-12-02 03:20:19 +00:00
|
|
|
}
|
|
|
|
|
2013-02-28 14:17:16 +00:00
|
|
|
trace_access_unlock(iter->cpu_file);
|
2008-12-02 03:20:19 +00:00
|
|
|
spd.nr_pages = i;
|
|
|
|
|
|
|
|
/* did we read anything? */
|
|
|
|
if (!spd.nr_pages) {
|
2014-11-06 21:26:07 +00:00
|
|
|
if (ret)
|
2016-09-17 22:31:46 +00:00
|
|
|
goto out;
|
2014-12-16 03:31:07 +00:00
|
|
|
|
2016-09-17 22:31:46 +00:00
|
|
|
ret = -EAGAIN;
|
2014-12-16 03:31:07 +00:00
|
|
|
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
|
2016-09-17 22:31:46 +00:00
|
|
|
goto out;
|
2014-11-06 21:26:07 +00:00
|
|
|
|
2018-11-30 02:38:42 +00:00
|
|
|
ret = wait_on_pipe(iter, iter->tr->buffer_percent);
|
2014-06-10 13:46:00 +00:00
|
|
|
if (ret)
|
2016-09-17 22:31:46 +00:00
|
|
|
goto out;
|
2014-11-10 18:46:34 +00:00
|
|
|
|
2013-02-28 14:17:16 +00:00
|
|
|
goto again;
|
2008-12-02 03:20:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = splice_to_pipe(pipe, &spd);
|
2016-09-17 22:31:46 +00:00
|
|
|
out:
|
2012-06-12 13:24:40 +00:00
|
|
|
splice_shrink_spd(&spd);
|
2013-03-05 21:18:16 +00:00
|
|
|
|
2008-12-02 03:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations tracing_buffers_fops = {
|
|
|
|
.open = tracing_buffers_open,
|
|
|
|
.read = tracing_buffers_read,
|
2013-02-28 14:17:16 +00:00
|
|
|
.poll = tracing_buffers_poll,
|
2008-12-02 03:20:19 +00:00
|
|
|
.release = tracing_buffers_release,
|
|
|
|
.splice_read = tracing_buffers_splice_read,
|
|
|
|
.llseek = no_llseek,
|
|
|
|
};
|
|
|
|
|
2009-04-29 22:03:45 +00:00
|
|
|
static ssize_t
|
|
|
|
tracing_stats_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2013-07-23 15:26:03 +00:00
|
|
|
struct inode *inode = file_inode(filp);
|
|
|
|
struct trace_array *tr = inode->i_private;
|
2020-01-09 23:53:48 +00:00
|
|
|
struct array_buffer *trace_buf = &tr->array_buffer;
|
2013-07-23 15:26:03 +00:00
|
|
|
int cpu = tracing_get_cpu(inode);
|
2009-04-29 22:03:45 +00:00
|
|
|
struct trace_seq *s;
|
|
|
|
unsigned long cnt;
|
2011-08-16 21:46:16 +00:00
|
|
|
unsigned long long t;
|
|
|
|
unsigned long usec_rem;
|
2009-04-29 22:03:45 +00:00
|
|
|
|
2009-06-15 02:57:28 +00:00
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
2009-04-29 22:03:45 +00:00
|
|
|
if (!s)
|
2009-11-11 21:26:35 +00:00
|
|
|
return -ENOMEM;
|
2009-04-29 22:03:45 +00:00
|
|
|
|
|
|
|
trace_seq_init(s);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
|
2009-04-29 22:03:45 +00:00
|
|
|
trace_seq_printf(s, "entries: %ld\n", cnt);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
|
2009-04-29 22:03:45 +00:00
|
|
|
trace_seq_printf(s, "overrun: %ld\n", cnt);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
|
2009-04-29 22:03:45 +00:00
|
|
|
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
|
2011-08-16 21:46:16 +00:00
|
|
|
trace_seq_printf(s, "bytes: %ld\n", cnt);
|
|
|
|
|
2013-04-23 01:32:39 +00:00
|
|
|
if (trace_clocks[tr->clock_id].in_ns) {
|
2012-11-13 20:18:23 +00:00
|
|
|
/* local or global for trace_clock */
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
|
2012-11-13 20:18:23 +00:00
|
|
|
usec_rem = do_div(t, USEC_PER_SEC);
|
|
|
|
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
|
|
|
|
t, usec_rem);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
|
2012-11-13 20:18:23 +00:00
|
|
|
usec_rem = do_div(t, USEC_PER_SEC);
|
|
|
|
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
|
|
|
|
} else {
|
|
|
|
/* counter or tsc mode for trace_clock */
|
|
|
|
trace_seq_printf(s, "oldest event ts: %llu\n",
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
|
2011-08-16 21:46:16 +00:00
|
|
|
|
2012-11-13 20:18:23 +00:00
|
|
|
trace_seq_printf(s, "now ts: %llu\n",
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
ring_buffer_time_stamp(trace_buf->buffer, cpu));
|
2012-11-13 20:18:23 +00:00
|
|
|
}
|
2011-08-16 21:46:16 +00:00
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
|
2011-07-15 21:23:58 +00:00
|
|
|
trace_seq_printf(s, "dropped events: %ld\n", cnt);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 14:24:35 +00:00
|
|
|
cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
|
2013-01-29 22:45:49 +00:00
|
|
|
trace_seq_printf(s, "read events: %ld\n", cnt);
|
|
|
|
|
2014-11-14 20:49:41 +00:00
|
|
|
count = simple_read_from_buffer(ubuf, count, ppos,
|
|
|
|
s->buffer, trace_seq_used(s));
|
2009-04-29 22:03:45 +00:00
|
|
|
|
|
|
|
kfree(s);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations tracing_stats_fops = {
|
2013-07-23 15:26:03 +00:00
|
|
|
.open = tracing_open_generic_tr,
|
2009-04-29 22:03:45 +00:00
|
|
|
.read = tracing_stats_read,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2013-07-23 15:26:03 +00:00
|
|
|
.release = tracing_release_generic_tr,
|
2009-04-29 22:03:45 +00:00
|
|
|
};
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
|
|
|
|
static ssize_t
|
2008-10-30 20:08:33 +00:00
|
|
|
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
|
2008-05-12 19:20:42 +00:00
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2019-10-01 18:38:07 +00:00
|
|
|
ssize_t ret;
|
|
|
|
char *buf;
|
2008-05-12 19:20:42 +00:00
|
|
|
int r;
|
|
|
|
|
2019-10-01 18:38:07 +00:00
|
|
|
/* 256 should be plenty to hold the amount needed */
|
|
|
|
buf = kmalloc(256, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
2008-10-30 20:08:33 +00:00
|
|
|
|
2019-10-01 18:38:07 +00:00
|
|
|
r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
|
|
|
|
ftrace_update_tot_cnt,
|
|
|
|
ftrace_number_of_pages,
|
|
|
|
ftrace_number_of_groups);
|
|
|
|
|
|
|
|
ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
kfree(buf);
|
|
|
|
return ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2009-03-06 02:44:55 +00:00
|
|
|
static const struct file_operations tracing_dyn_info_fops = {
|
2008-05-12 19:20:46 +00:00
|
|
|
.open = tracing_open_generic,
|
2008-10-30 20:08:33 +00:00
|
|
|
.read = tracing_read_dyn_info,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2008-05-12 19:20:42 +00:00
|
|
|
};
|
2013-03-12 15:49:18 +00:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2013-03-12 15:49:18 +00:00
|
|
|
#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
|
|
|
|
static void
|
2017-04-03 22:18:47 +00:00
|
|
|
ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
|
2017-04-11 02:30:05 +00:00
|
|
|
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
void *data)
|
2013-03-12 15:49:18 +00:00
|
|
|
{
|
2017-04-20 15:34:06 +00:00
|
|
|
tracing_snapshot_instance(tr);
|
2013-03-12 15:49:18 +00:00
|
|
|
}
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2013-03-12 15:49:18 +00:00
|
|
|
static void
|
2017-04-03 22:18:47 +00:00
|
|
|
ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
|
2017-04-11 02:30:05 +00:00
|
|
|
struct trace_array *tr, struct ftrace_probe_ops *ops,
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
void *data)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
struct ftrace_func_mapper *mapper = data;
|
2017-04-04 02:09:43 +00:00
|
|
|
long *count = NULL;
|
2013-03-12 15:49:18 +00:00
|
|
|
|
2017-04-04 02:09:43 +00:00
|
|
|
if (mapper)
|
|
|
|
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
|
|
|
|
|
|
|
if (count) {
|
|
|
|
|
|
|
|
if (*count <= 0)
|
|
|
|
return;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2013-03-12 15:49:18 +00:00
|
|
|
(*count)--;
|
2017-04-04 02:09:43 +00:00
|
|
|
}
|
2013-03-12 15:49:18 +00:00
|
|
|
|
2017-04-20 15:34:06 +00:00
|
|
|
tracing_snapshot_instance(tr);
|
2013-03-12 15:49:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
|
|
|
|
struct ftrace_probe_ops *ops, void *data)
|
|
|
|
{
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
struct ftrace_func_mapper *mapper = data;
|
2017-04-04 02:09:43 +00:00
|
|
|
long *count = NULL;
|
2013-03-12 15:49:18 +00:00
|
|
|
|
|
|
|
seq_printf(m, "%ps:", (void *)ip);
|
|
|
|
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_puts(m, "snapshot");
|
2013-03-12 15:49:18 +00:00
|
|
|
|
2017-04-04 02:09:43 +00:00
|
|
|
if (mapper)
|
|
|
|
count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
|
|
|
|
|
|
|
|
if (count)
|
|
|
|
seq_printf(m, ":count=%ld\n", *count);
|
2013-03-12 15:49:18 +00:00
|
|
|
else
|
2017-04-04 02:09:43 +00:00
|
|
|
seq_puts(m, ":unlimited\n");
|
2013-03-12 15:49:18 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-04 02:09:43 +00:00
|
|
|
static int
|
2017-04-11 02:30:05 +00:00
|
|
|
ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
unsigned long ip, void *init_data, void **data)
|
2017-04-04 02:09:43 +00:00
|
|
|
{
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
struct ftrace_func_mapper *mapper = *data;
|
|
|
|
|
|
|
|
if (!mapper) {
|
|
|
|
mapper = allocate_ftrace_func_mapper();
|
|
|
|
if (!mapper)
|
|
|
|
return -ENOMEM;
|
|
|
|
*data = mapper;
|
|
|
|
}
|
2017-04-04 02:09:43 +00:00
|
|
|
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
return ftrace_func_mapper_add_ip(mapper, ip, init_data);
|
2017-04-04 02:09:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-04-11 02:30:05 +00:00
|
|
|
ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
unsigned long ip, void *data)
|
2017-04-04 02:09:43 +00:00
|
|
|
{
|
tracing/ftrace: Add a better way to pass data via the probe functions
With the redesign of the registration and execution of the function probes
(triggers), data can now be passed from the setup of the probe to the probe
callers that are specific to the trace_array it is on. Although, all probes
still only affect the toplevel trace array, this change will allow for
instances to have their own probes separated from other instances and the
top array.
That is, something like the stacktrace probe can be set to trace only in an
instance and not the toplevel trace array. This isn't implement yet, but
this change sets the ground work for the change.
When a probe callback is triggered (someone writes the probe format into
set_ftrace_filter), it calls register_ftrace_function_probe() passing in
init_data that will be used to initialize the probe. Then for every matching
function, register_ftrace_function_probe() will call the probe_ops->init()
function with the init data that was passed to it, as well as an address to
a place holder that is associated with the probe and the instance. The first
occurrence will have a NULL in the pointer. The init() function will then
initialize it. If other probes are added, or more functions are part of the
probe, the place holder will be passed to the init() function with the place
holder data that it was initialized to the last time.
Then this place_holder is passed to each of the other probe_ops functions,
where it can be used in the function callback. When the probe_ops free()
function is called, it can be called either with the rip of the function
that is being removed from the probe, or zero, indicating that there are no
more functions attached to the probe, and the place holder is about to be
freed. This gives the probe_ops a way to free the data it assigned to the
place holder if it was allocade during the first init call.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-04-20 02:39:44 +00:00
|
|
|
struct ftrace_func_mapper *mapper = data;
|
|
|
|
|
|
|
|
if (!ip) {
|
|
|
|
if (!mapper)
|
|
|
|
return;
|
|
|
|
free_ftrace_func_mapper(mapper, NULL);
|
|
|
|
return;
|
|
|
|
}
|
2017-04-04 02:09:43 +00:00
|
|
|
|
|
|
|
ftrace_func_mapper_remove_ip(mapper, ip);
|
|
|
|
}
|
|
|
|
|
2013-03-12 15:49:18 +00:00
|
|
|
static struct ftrace_probe_ops snapshot_probe_ops = {
|
|
|
|
.func = ftrace_snapshot,
|
|
|
|
.print = ftrace_snapshot_print,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct ftrace_probe_ops snapshot_count_probe_ops = {
|
|
|
|
.func = ftrace_count_snapshot,
|
|
|
|
.print = ftrace_snapshot_print,
|
2017-04-04 02:09:43 +00:00
|
|
|
.init = ftrace_snapshot_init,
|
|
|
|
.free = ftrace_snapshot_free,
|
2013-03-12 15:49:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2017-04-05 17:12:55 +00:00
|
|
|
ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
|
2013-03-12 15:49:18 +00:00
|
|
|
char *glob, char *cmd, char *param, int enable)
|
|
|
|
{
|
|
|
|
struct ftrace_probe_ops *ops;
|
|
|
|
void *count = (void *)-1;
|
|
|
|
char *number;
|
|
|
|
int ret;
|
|
|
|
|
2017-06-29 14:05:45 +00:00
|
|
|
if (!tr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2013-03-12 15:49:18 +00:00
|
|
|
/* hash funcs only work with set_ftrace_filter */
|
|
|
|
if (!enable)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
|
|
|
|
|
2017-04-04 20:44:43 +00:00
|
|
|
if (glob[0] == '!')
|
2017-04-18 18:50:39 +00:00
|
|
|
return unregister_ftrace_function_probe_func(glob+1, tr, ops);
|
2013-03-12 15:49:18 +00:00
|
|
|
|
|
|
|
if (!param)
|
|
|
|
goto out_reg;
|
|
|
|
|
|
|
|
number = strsep(¶m, ":");
|
|
|
|
|
|
|
|
if (!strlen(number))
|
|
|
|
goto out_reg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use the callback data field (which is a pointer)
|
|
|
|
* as our counter.
|
|
|
|
*/
|
|
|
|
ret = kstrtoul(number, 0, (unsigned long *)&count);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
out_reg:
|
2018-05-28 14:56:36 +00:00
|
|
|
ret = tracing_alloc_snapshot_instance(tr);
|
2017-04-19 16:07:08 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2013-03-12 15:49:18 +00:00
|
|
|
|
2017-05-04 01:41:21 +00:00
|
|
|
ret = register_ftrace_function_probe(glob, tr, ops, count);
|
2013-03-12 15:49:18 +00:00
|
|
|
|
2017-04-19 16:07:08 +00:00
|
|
|
out:
|
2013-03-12 15:49:18 +00:00
|
|
|
return ret < 0 ? ret : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ftrace_func_command ftrace_snapshot_cmd = {
|
|
|
|
.name = "snapshot",
|
|
|
|
.func = ftrace_trace_snapshot_callback,
|
|
|
|
};
|
|
|
|
|
2013-10-24 13:34:18 +00:00
|
|
|
static __init int register_snapshot_cmd(void)
|
2013-03-12 15:49:18 +00:00
|
|
|
{
|
|
|
|
return register_ftrace_command(&ftrace_snapshot_cmd);
|
|
|
|
}
|
|
|
|
#else
|
2013-10-24 13:34:18 +00:00
|
|
|
static inline __init int register_snapshot_cmd(void) { return 0; }
|
2013-03-12 15:49:18 +00:00
|
|
|
#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2015-01-27 02:00:48 +00:00
|
|
|
static struct dentry *tracing_get_dentry(struct trace_array *tr)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2015-01-20 17:13:40 +00:00
|
|
|
if (WARN_ON(!tr->dir))
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
/* Top directory uses NULL as the parent */
|
|
|
|
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* All sub buffers have a descriptor */
|
2012-05-11 17:29:49 +00:00
|
|
|
return tr->dir;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
{
|
|
|
|
struct dentry *d_tracer;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->percpu_dir)
|
|
|
|
return tr->percpu_dir;
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
|
2015-01-27 02:00:48 +00:00
|
|
|
d_tracer = tracing_get_dentry(tr);
|
2015-01-20 16:14:16 +00:00
|
|
|
if (IS_ERR(d_tracer))
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
return NULL;
|
|
|
|
|
2015-01-20 17:13:40 +00:00
|
|
|
tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
|
2020-01-25 15:52:30 +00:00
|
|
|
MEM_FAIL(!tr->percpu_dir,
|
2015-01-20 17:13:40 +00:00
|
|
|
"Could not create tracefs directory 'per_cpu/%d'\n", cpu);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
return tr->percpu_dir;
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
}
|
|
|
|
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
static struct dentry *
|
|
|
|
trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
|
|
|
|
void *data, long cpu, const struct file_operations *fops)
|
|
|
|
{
|
|
|
|
struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
|
|
|
|
|
|
|
|
if (ret) /* See tracing_get_cpu() */
|
2015-03-17 22:26:16 +00:00
|
|
|
d_inode(ret)->i_cdev = (void *)(cpu + 1);
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
static void
|
2015-01-20 17:13:40 +00:00
|
|
|
tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
{
|
2012-05-11 17:29:49 +00:00
|
|
|
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
|
2009-03-26 23:25:38 +00:00
|
|
|
struct dentry *d_cpu;
|
2010-10-21 01:51:26 +00:00
|
|
|
char cpu_dir[30]; /* 30 characters should be more than enough */
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
|
2012-04-23 01:11:57 +00:00
|
|
|
if (!d_percpu)
|
|
|
|
return;
|
|
|
|
|
2010-10-21 01:51:26 +00:00
|
|
|
snprintf(cpu_dir, 30, "cpu%ld", cpu);
|
2015-01-20 17:13:40 +00:00
|
|
|
d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
|
2009-02-25 23:41:38 +00:00
|
|
|
if (!d_cpu) {
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
|
2009-02-25 23:41:38 +00:00
|
|
|
return;
|
|
|
|
}
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
|
2009-02-25 23:41:38 +00:00
|
|
|
/* per cpu trace_pipe */
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
trace_create_cpu_file("trace_pipe", 0444, d_cpu,
|
2013-07-23 15:25:57 +00:00
|
|
|
tr, cpu, &tracing_pipe_fops);
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
|
|
|
|
/* per cpu trace */
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
trace_create_cpu_file("trace", 0644, d_cpu,
|
2013-07-23 15:26:10 +00:00
|
|
|
tr, cpu, &tracing_fops);
|
2009-03-13 04:37:42 +00:00
|
|
|
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
|
2013-07-23 15:26:00 +00:00
|
|
|
tr, cpu, &tracing_buffers_fops);
|
2009-03-13 04:37:42 +00:00
|
|
|
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
trace_create_cpu_file("stats", 0444, d_cpu,
|
2013-07-23 15:26:03 +00:00
|
|
|
tr, cpu, &tracing_stats_fops);
|
2012-02-02 20:00:41 +00:00
|
|
|
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
|
2013-07-23 15:26:06 +00:00
|
|
|
tr, cpu, &tracing_entries_fops);
|
2013-03-05 19:35:11 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
trace_create_cpu_file("snapshot", 0644, d_cpu,
|
2013-07-23 15:26:10 +00:00
|
|
|
tr, cpu, &snapshot_fops);
|
2013-03-05 21:18:16 +00:00
|
|
|
|
tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Every "file_operations" used by tracing_init_debugfs_percpu is buggy.
f_op->open/etc does:
1. struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
2. trace_array_get(tr) or fail;
3. do_something(tc);
But tc (and tr) can be already freed before trace_array_get() is called.
And it doesn't matter whether this file is per-cpu or it was created by
init_tracer_debugfs(), free_percpu() or kfree() are equally bad.
Note that even 1. is not safe, the freed memory can be unmapped. But even
if it was safe trace_array_get() can wrongly succeed if we also race with
the next new_instance_create() which can re-allocate the same tr, or tc
was overwritten and ->tr points to the valid tr. In this case 3. uses the
freed/reused memory.
Add the new trivial helper, trace_create_cpu_file() which simply calls
trace_create_file() and encodes "cpu" in "struct inode". Another helper,
tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS.
The patch abuses ->i_cdev to encode the number, it is never used unless
the file is S_ISCHR(). But we could use something else, say, i_bytes or
even ->d_fsdata. In any case this hack is hidden inside these 2 helpers,
it would be trivial to change them if needed.
This patch only changes tracing_init_debugfs_percpu() to use the new
trace_create_cpu_file(), the next patches will change file_operations.
Note: tracing_get_cpu(inode) is always safe but you can't trust the
result unless trace_array_get() was called, without trace_types_lock
which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS.
Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-23 15:25:54 +00:00
|
|
|
trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
|
2013-07-23 15:26:00 +00:00
|
|
|
tr, cpu, &snapshot_raw_fops);
|
2013-03-05 19:35:11 +00:00
|
|
|
#endif
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:44 +00:00
|
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
|
|
/* Let selftest have access to static functions in this file */
|
|
|
|
#include "trace_selftest.c"
|
|
|
|
#endif
|
|
|
|
|
2009-02-27 04:43:05 +00:00
|
|
|
static ssize_t
|
|
|
|
trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_option_dentry *topt = filp->private_data;
|
|
|
|
char *buf;
|
|
|
|
|
|
|
|
if (topt->flags->val & topt->opt->bit)
|
|
|
|
buf = "1\n";
|
|
|
|
else
|
|
|
|
buf = "0\n";
|
|
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_option_dentry *topt = filp->private_data;
|
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
2011-06-07 19:58:27 +00:00
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
2009-02-27 04:43:05 +00:00
|
|
|
return ret;
|
|
|
|
|
2009-12-08 03:17:06 +00:00
|
|
|
if (val != 0 && val != 1)
|
|
|
|
return -EINVAL;
|
2009-02-27 04:43:05 +00:00
|
|
|
|
2009-12-08 03:17:06 +00:00
|
|
|
if (!!(topt->flags->val & topt->opt->bit) != val) {
|
2009-02-27 04:43:05 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2014-01-10 16:13:54 +00:00
|
|
|
ret = __set_tracer_option(topt->tr, topt->flags,
|
2009-12-22 03:35:16 +00:00
|
|
|
topt->opt, !val);
|
2009-02-27 04:43:05 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
*ppos += cnt;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static const struct file_operations trace_options_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = trace_options_read,
|
|
|
|
.write = trace_options_write,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2009-02-27 04:43:05 +00:00
|
|
|
};
|
|
|
|
|
2015-09-30 15:11:15 +00:00
|
|
|
/*
|
|
|
|
* In order to pass in both the trace_array descriptor as well as the index
|
|
|
|
* to the flag that the trace option file represents, the trace_array
|
|
|
|
* has a character array of trace_flags_index[], which holds the index
|
|
|
|
* of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
|
|
|
|
* The address of this character array is passed to the flag option file
|
|
|
|
* read/write callbacks.
|
|
|
|
*
|
|
|
|
* In order to extract both the index and the trace_array descriptor,
|
|
|
|
* get_tr_index() uses the following algorithm.
|
|
|
|
*
|
|
|
|
* idx = *ptr;
|
|
|
|
*
|
|
|
|
* As the pointer itself contains the address of the index (remember
|
|
|
|
* index[1] == 1).
|
|
|
|
*
|
|
|
|
* Then to get the trace_array descriptor, by subtracting that index
|
|
|
|
* from the ptr, we get to the start of the index itself.
|
|
|
|
*
|
|
|
|
* ptr - idx == &index[0]
|
|
|
|
*
|
|
|
|
* Then a simple container_of() from that pointer gets us to the
|
|
|
|
* trace_array descriptor.
|
|
|
|
*/
|
|
|
|
static void get_tr_index(void *data, struct trace_array **ptr,
|
|
|
|
unsigned int *pindex)
|
|
|
|
{
|
|
|
|
*pindex = *(unsigned char *)data;
|
|
|
|
|
|
|
|
*ptr = container_of(data - *pindex, struct trace_array,
|
|
|
|
trace_flags_index);
|
|
|
|
}
|
|
|
|
|
2009-02-27 03:19:12 +00:00
|
|
|
static ssize_t
|
|
|
|
trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
2015-09-30 15:11:15 +00:00
|
|
|
void *tr_index = filp->private_data;
|
|
|
|
struct trace_array *tr;
|
|
|
|
unsigned int index;
|
2009-02-27 03:19:12 +00:00
|
|
|
char *buf;
|
|
|
|
|
2015-09-30 15:11:15 +00:00
|
|
|
get_tr_index(tr_index, &tr, &index);
|
|
|
|
|
|
|
|
if (tr->trace_flags & (1 << index))
|
2009-02-27 03:19:12 +00:00
|
|
|
buf = "1\n";
|
|
|
|
else
|
|
|
|
buf = "0\n";
|
|
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
2015-09-30 15:11:15 +00:00
|
|
|
void *tr_index = filp->private_data;
|
|
|
|
struct trace_array *tr;
|
|
|
|
unsigned int index;
|
2009-02-27 03:19:12 +00:00
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
2015-09-30 15:11:15 +00:00
|
|
|
get_tr_index(tr_index, &tr, &index);
|
|
|
|
|
2011-06-07 19:58:27 +00:00
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
2009-02-27 03:19:12 +00:00
|
|
|
return ret;
|
|
|
|
|
2009-08-07 10:55:48 +00:00
|
|
|
if (val != 0 && val != 1)
|
2009-02-27 03:19:12 +00:00
|
|
|
return -EINVAL;
|
2013-03-14 17:50:56 +00:00
|
|
|
|
2019-12-10 09:15:16 +00:00
|
|
|
mutex_lock(&event_mutex);
|
2013-03-14 17:50:56 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2012-05-11 17:29:49 +00:00
|
|
|
ret = set_tracer_flag(tr, 1 << index, val);
|
2013-03-14 17:50:56 +00:00
|
|
|
mutex_unlock(&trace_types_lock);
|
2019-12-10 09:15:16 +00:00
|
|
|
mutex_unlock(&event_mutex);
|
2009-02-27 03:19:12 +00:00
|
|
|
|
2013-03-14 19:03:53 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2009-02-27 03:19:12 +00:00
|
|
|
*ppos += cnt;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations trace_options_core_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = trace_options_core_read,
|
|
|
|
.write = trace_options_core_write,
|
2010-07-07 21:40:11 +00:00
|
|
|
.llseek = generic_file_llseek,
|
2009-02-27 03:19:12 +00:00
|
|
|
};
|
|
|
|
|
2009-03-26 23:25:38 +00:00
|
|
|
struct dentry *trace_create_file(const char *name,
|
2011-07-24 08:33:43 +00:00
|
|
|
umode_t mode,
|
2009-03-26 23:25:38 +00:00
|
|
|
struct dentry *parent,
|
|
|
|
void *data,
|
|
|
|
const struct file_operations *fops)
|
|
|
|
{
|
|
|
|
struct dentry *ret;
|
|
|
|
|
2015-01-20 17:13:40 +00:00
|
|
|
ret = tracefs_create_file(name, mode, parent, data, fops);
|
2009-03-26 23:25:38 +00:00
|
|
|
if (!ret)
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Could not create tracefs '%s' entry\n", name);
|
2009-03-26 23:25:38 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
static struct dentry *trace_options_init_dentry(struct trace_array *tr)
|
2009-02-27 03:19:12 +00:00
|
|
|
{
|
|
|
|
struct dentry *d_tracer;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->options)
|
|
|
|
return tr->options;
|
2009-02-27 03:19:12 +00:00
|
|
|
|
2015-01-27 02:00:48 +00:00
|
|
|
d_tracer = tracing_get_dentry(tr);
|
2015-01-20 16:14:16 +00:00
|
|
|
if (IS_ERR(d_tracer))
|
2009-02-27 03:19:12 +00:00
|
|
|
return NULL;
|
|
|
|
|
2015-01-20 17:13:40 +00:00
|
|
|
tr->options = tracefs_create_dir("options", d_tracer);
|
2012-05-11 17:29:49 +00:00
|
|
|
if (!tr->options) {
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Could not create tracefs directory 'options'\n");
|
2009-02-27 03:19:12 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
return tr->options;
|
2009-02-27 03:19:12 +00:00
|
|
|
}
|
|
|
|
|
2009-02-27 04:43:05 +00:00
|
|
|
static void
|
2012-05-11 17:29:49 +00:00
|
|
|
create_trace_option_file(struct trace_array *tr,
|
|
|
|
struct trace_option_dentry *topt,
|
2009-02-27 04:43:05 +00:00
|
|
|
struct tracer_flags *flags,
|
|
|
|
struct tracer_opt *opt)
|
|
|
|
{
|
|
|
|
struct dentry *t_options;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
t_options = trace_options_init_dentry(tr);
|
2009-02-27 04:43:05 +00:00
|
|
|
if (!t_options)
|
|
|
|
return;
|
|
|
|
|
|
|
|
topt->flags = flags;
|
|
|
|
topt->opt = opt;
|
2012-05-11 17:29:49 +00:00
|
|
|
topt->tr = tr;
|
2009-02-27 04:43:05 +00:00
|
|
|
|
2009-03-26 23:25:38 +00:00
|
|
|
topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
|
2009-02-27 04:43:05 +00:00
|
|
|
&trace_options_fops);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-09-30 18:27:31 +00:00
|
|
|
static void
|
2012-05-11 17:29:49 +00:00
|
|
|
create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
|
2009-02-27 04:43:05 +00:00
|
|
|
{
|
|
|
|
struct trace_option_dentry *topts;
|
2015-09-30 18:27:31 +00:00
|
|
|
struct trace_options *tr_topts;
|
2009-02-27 04:43:05 +00:00
|
|
|
struct tracer_flags *flags;
|
|
|
|
struct tracer_opt *opts;
|
|
|
|
int cnt;
|
2015-09-30 18:27:31 +00:00
|
|
|
int i;
|
2009-02-27 04:43:05 +00:00
|
|
|
|
|
|
|
if (!tracer)
|
2015-09-30 18:27:31 +00:00
|
|
|
return;
|
2009-02-27 04:43:05 +00:00
|
|
|
|
|
|
|
flags = tracer->flags;
|
|
|
|
|
|
|
|
if (!flags || !flags->opts)
|
2015-09-30 18:27:31 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is an instance, only create flags for tracers
|
|
|
|
* the instance may have.
|
|
|
|
*/
|
|
|
|
if (!trace_ok_for_array(tracer, tr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < tr->nr_topts; i++) {
|
2016-03-08 13:37:01 +00:00
|
|
|
/* Make sure there's no duplicate flags. */
|
|
|
|
if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
|
2015-09-30 18:27:31 +00:00
|
|
|
return;
|
|
|
|
}
|
2009-02-27 04:43:05 +00:00
|
|
|
|
|
|
|
opts = flags->opts;
|
|
|
|
|
|
|
|
for (cnt = 0; opts[cnt].name; cnt++)
|
|
|
|
;
|
|
|
|
|
2009-02-27 15:51:10 +00:00
|
|
|
topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
|
2009-02-27 04:43:05 +00:00
|
|
|
if (!topts)
|
2015-09-30 18:27:31 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!tr_topts) {
|
|
|
|
kfree(topts);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tr->topts = tr_topts;
|
|
|
|
tr->topts[tr->nr_topts].tracer = tracer;
|
|
|
|
tr->topts[tr->nr_topts].topts = topts;
|
|
|
|
tr->nr_topts++;
|
2009-02-27 04:43:05 +00:00
|
|
|
|
2015-09-29 21:31:55 +00:00
|
|
|
for (cnt = 0; opts[cnt].name; cnt++) {
|
2012-05-11 17:29:49 +00:00
|
|
|
create_trace_option_file(tr, &topts[cnt], flags,
|
2009-02-27 04:43:05 +00:00
|
|
|
&opts[cnt]);
|
2020-01-25 15:52:30 +00:00
|
|
|
MEM_FAIL(topts[cnt].entry == NULL,
|
2015-09-29 21:31:55 +00:00
|
|
|
"Failed to create trace option: %s",
|
|
|
|
opts[cnt].name);
|
|
|
|
}
|
2009-02-27 04:43:05 +00:00
|
|
|
}
|
|
|
|
|
2009-02-27 03:19:12 +00:00
|
|
|
static struct dentry *
|
2012-05-11 17:29:49 +00:00
|
|
|
create_trace_option_core_file(struct trace_array *tr,
|
|
|
|
const char *option, long index)
|
2009-02-27 03:19:12 +00:00
|
|
|
{
|
|
|
|
struct dentry *t_options;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
t_options = trace_options_init_dentry(tr);
|
2009-02-27 03:19:12 +00:00
|
|
|
if (!t_options)
|
|
|
|
return NULL;
|
|
|
|
|
2015-09-30 15:11:15 +00:00
|
|
|
return trace_create_file(option, 0644, t_options,
|
|
|
|
(void *)&tr->trace_flags_index[index],
|
|
|
|
&trace_options_core_fops);
|
2009-02-27 03:19:12 +00:00
|
|
|
}
|
|
|
|
|
2015-09-30 16:30:06 +00:00
|
|
|
static void create_trace_options_dir(struct trace_array *tr)
|
2009-02-27 03:19:12 +00:00
|
|
|
{
|
|
|
|
struct dentry *t_options;
|
2015-09-30 16:30:06 +00:00
|
|
|
bool top_level = tr == &global_trace;
|
2009-02-27 03:19:12 +00:00
|
|
|
int i;
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
t_options = trace_options_init_dentry(tr);
|
2009-02-27 03:19:12 +00:00
|
|
|
if (!t_options)
|
|
|
|
return;
|
|
|
|
|
2015-09-30 16:30:06 +00:00
|
|
|
for (i = 0; trace_options[i]; i++) {
|
|
|
|
if (top_level ||
|
|
|
|
!((1 << i) & TOP_LEVEL_TRACE_FLAGS))
|
|
|
|
create_trace_option_core_file(tr, trace_options[i], i);
|
|
|
|
}
|
2009-02-27 03:19:12 +00:00
|
|
|
}
|
|
|
|
|
2012-02-22 20:50:28 +00:00
|
|
|
static ssize_t
|
|
|
|
rb_simple_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2012-04-16 19:41:28 +00:00
|
|
|
struct trace_array *tr = filp->private_data;
|
2012-02-22 20:50:28 +00:00
|
|
|
char buf[64];
|
|
|
|
int r;
|
|
|
|
|
2013-07-01 19:58:24 +00:00
|
|
|
r = tracer_tracing_is_on(tr);
|
2012-02-22 20:50:28 +00:00
|
|
|
r = sprintf(buf, "%d\n", r);
|
|
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
rb_simple_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
2012-04-16 19:41:28 +00:00
|
|
|
struct trace_array *tr = filp->private_data;
|
2019-12-13 18:58:57 +00:00
|
|
|
struct trace_buffer *buffer = tr->array_buffer.buffer;
|
2012-02-22 20:50:28 +00:00
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (buffer) {
|
2013-01-11 21:14:10 +00:00
|
|
|
mutex_lock(&trace_types_lock);
|
2018-08-01 19:40:57 +00:00
|
|
|
if (!!val == tracer_tracing_is_on(tr)) {
|
|
|
|
val = 0; /* do nothing */
|
|
|
|
} else if (val) {
|
2013-07-01 19:58:24 +00:00
|
|
|
tracer_tracing_on(tr);
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->current_trace->start)
|
|
|
|
tr->current_trace->start(tr);
|
2013-01-11 21:14:10 +00:00
|
|
|
} else {
|
2013-07-01 19:58:24 +00:00
|
|
|
tracer_tracing_off(tr);
|
2012-05-11 17:29:49 +00:00
|
|
|
if (tr->current_trace->stop)
|
|
|
|
tr->current_trace->stop(tr);
|
2013-01-11 21:14:10 +00:00
|
|
|
}
|
|
|
|
mutex_unlock(&trace_types_lock);
|
2012-02-22 20:50:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
(*ppos)++;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations rb_simple_fops = {
|
2013-07-02 03:34:22 +00:00
|
|
|
.open = tracing_open_generic_tr,
|
2012-02-22 20:50:28 +00:00
|
|
|
.read = rb_simple_read,
|
|
|
|
.write = rb_simple_write,
|
2013-07-02 03:34:22 +00:00
|
|
|
.release = tracing_release_generic_tr,
|
2012-02-22 20:50:28 +00:00
|
|
|
.llseek = default_llseek,
|
|
|
|
};
|
|
|
|
|
2018-11-30 02:38:42 +00:00
|
|
|
static ssize_t
|
|
|
|
buffer_percent_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = filp->private_data;
|
|
|
|
char buf[64];
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = tr->buffer_percent;
|
|
|
|
r = sprintf(buf, "%d\n", r);
|
|
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
buffer_percent_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = filp->private_data;
|
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (val > 100)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!val)
|
|
|
|
val = 1;
|
|
|
|
|
|
|
|
tr->buffer_percent = val;
|
|
|
|
|
|
|
|
(*ppos)++;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations buffer_percent_fops = {
|
|
|
|
.open = tracing_open_generic_tr,
|
|
|
|
.read = buffer_percent_read,
|
|
|
|
.write = buffer_percent_write,
|
|
|
|
.release = tracing_release_generic_tr,
|
|
|
|
.llseek = default_llseek,
|
|
|
|
};
|
|
|
|
|
2019-06-14 15:32:10 +00:00
|
|
|
static struct dentry *trace_instance_dir;
|
2012-08-03 20:10:49 +00:00
|
|
|
|
|
|
|
static void
|
2015-01-20 17:13:40 +00:00
|
|
|
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
|
2012-08-03 20:10:49 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
static int
|
2020-01-09 23:53:48 +00:00
|
|
|
allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
|
2012-08-03 20:10:49 +00:00
|
|
|
{
|
|
|
|
enum ring_buffer_flags rb_flags;
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2014-01-14 15:19:46 +00:00
|
|
|
buf->tr = tr;
|
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
buf->buffer = ring_buffer_alloc(size, rb_flags);
|
|
|
|
if (!buf->buffer)
|
|
|
|
return -ENOMEM;
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
buf->data = alloc_percpu(struct trace_array_cpu);
|
|
|
|
if (!buf->data) {
|
|
|
|
ring_buffer_free(buf->buffer);
|
2017-12-27 01:07:34 +00:00
|
|
|
buf->buffer = NULL;
|
2013-03-08 03:48:09 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-03-06 02:13:47 +00:00
|
|
|
|
|
|
|
/* Allocate the first page for all buffers */
|
2020-01-09 23:53:48 +00:00
|
|
|
set_buffer_entries(&tr->array_buffer,
|
|
|
|
ring_buffer_size(tr->array_buffer.buffer, 0));
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
static int allocate_trace_buffers(struct trace_array *tr, int size)
|
|
|
|
{
|
|
|
|
int ret;
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
|
2013-03-08 03:48:09 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
|
ret = allocate_trace_buffer(tr, &tr->max_buffer,
|
|
|
|
allocate_snapshot ? size : 1);
|
2020-01-25 15:52:30 +00:00
|
|
|
if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
|
2020-01-09 23:53:48 +00:00
|
|
|
ring_buffer_free(tr->array_buffer.buffer);
|
|
|
|
tr->array_buffer.buffer = NULL;
|
|
|
|
free_percpu(tr->array_buffer.data);
|
|
|
|
tr->array_buffer.data = NULL;
|
2013-03-08 03:48:09 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
tr->allocated_snapshot = allocate_snapshot;
|
2013-03-06 02:13:47 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
/*
|
|
|
|
* Only the top level trace array gets its snapshot allocated
|
|
|
|
* from the kernel command line.
|
|
|
|
*/
|
|
|
|
allocate_snapshot = false;
|
2013-03-06 02:13:47 +00:00
|
|
|
#endif
|
tracing: Add a vmalloc_sync_mappings() for safe measure
x86_64 lazily maps in the vmalloc pages, and the way this works with per_cpu
areas can be complex, to say the least. Mappings may happen at boot up, and
if nothing synchronizes the page tables, those page mappings may not be
synced till they are used. This causes issues for anything that might touch
one of those mappings in the path of the page fault handler. When one of
those unmapped mappings is touched in the page fault handler, it will cause
another page fault, which in turn will cause a page fault, and leave us in
a loop of page faults.
Commit 763802b53a42 ("x86/mm: split vmalloc_sync_all()") split
vmalloc_sync_all() into vmalloc_sync_unmappings() and
vmalloc_sync_mappings(), as on system exit, it did not need to do a full
sync on x86_64 (although it still needed to be done on x86_32). By chance,
the vmalloc_sync_all() would synchronize the page mappings done at boot up
and prevent the per cpu area from being a problem for tracing in the page
fault handler. But when that synchronization in the exit of a task became a
nop, it caused the problem to appear.
Link: https://lore.kernel.org/r/20200429054857.66e8e333@oasis.local.home
Cc: stable@vger.kernel.org
Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code")
Reported-by: "Tzvetomir Stoyanov (VMware)" <tz.stoyanov@gmail.com>
Suggested-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2020-05-06 14:36:18 +00:00
|
|
|
|
2013-03-08 03:48:09 +00:00
|
|
|
return 0;
|
2013-03-06 02:13:47 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
static void free_trace_buffer(struct array_buffer *buf)
|
2014-06-10 16:06:30 +00:00
|
|
|
{
|
|
|
|
if (buf->buffer) {
|
|
|
|
ring_buffer_free(buf->buffer);
|
|
|
|
buf->buffer = NULL;
|
|
|
|
free_percpu(buf->data);
|
|
|
|
buf->data = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-06 04:01:46 +00:00
|
|
|
static void free_trace_buffers(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
if (!tr)
|
|
|
|
return;
|
|
|
|
|
2020-01-09 23:53:48 +00:00
|
|
|
free_trace_buffer(&tr->array_buffer);
|
2014-06-06 04:01:46 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
2014-06-10 16:06:30 +00:00
|
|
|
free_trace_buffer(&tr->max_buffer);
|
2014-06-06 04:01:46 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-09-30 15:11:15 +00:00
|
|
|
static void init_trace_flags_index(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Used by the trace options files */
|
|
|
|
for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
|
|
|
|
tr->trace_flags_index[i] = i;
|
|
|
|
}
|
|
|
|
|
2015-09-30 18:27:31 +00:00
|
|
|
static void __update_tracer_options(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
struct tracer *t;
|
|
|
|
|
|
|
|
for (t = trace_types; t; t = t->next)
|
|
|
|
add_tracer_options(tr, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_tracer_options(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
__update_tracer_options(tr);
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
}
|
|
|
|
|
2020-01-29 18:59:21 +00:00
|
|
|
/* Must have trace_types_lock held */
|
|
|
|
struct trace_array *trace_array_find(const char *instance)
|
|
|
|
{
|
|
|
|
struct trace_array *tr, *found = NULL;
|
|
|
|
|
|
|
|
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
|
|
|
if (tr->name && strcmp(tr->name, instance) == 0) {
|
|
|
|
found = tr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct trace_array *trace_array_find_get(const char *instance)
|
|
|
|
{
|
|
|
|
struct trace_array *tr;
|
|
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
tr = trace_array_find(instance);
|
|
|
|
if (tr)
|
|
|
|
tr->ref++;
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
|
|
return tr;
|
|
|
|
}
|
|
|
|
|
2019-11-20 19:08:38 +00:00
|
|
|
static struct trace_array *trace_array_create(const char *name)
|
2013-03-06 02:13:47 +00:00
|
|
|
{
|
2012-08-03 20:10:49 +00:00
|
|
|
struct trace_array *tr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
tr = kzalloc(sizeof(*tr), GFP_KERNEL);
|
|
|
|
if (!tr)
|
2019-11-20 19:08:38 +00:00
|
|
|
return ERR_PTR(ret);
|
2012-08-03 20:10:49 +00:00
|
|
|
|
|
|
|
tr->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!tr->name)
|
|
|
|
goto out_free_tr;
|
|
|
|
|
2013-08-08 16:47:45 +00:00
|
|
|
if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
|
|
|
|
goto out_free_tr;
|
|
|
|
|
2016-04-26 02:40:12 +00:00
|
|
|
tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
|
2015-09-30 13:42:05 +00:00
|
|
|
|
2013-08-08 16:47:45 +00:00
|
|
|
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
|
|
|
|
|
2012-08-03 20:10:49 +00:00
|
|
|
raw_spin_lock_init(&tr->start_lock);
|
|
|
|
|
2014-01-14 15:04:59 +00:00
|
|
|
tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
|
2012-08-03 20:10:49 +00:00
|
|
|
tr->current_trace = &nop_trace;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&tr->systems);
|
|
|
|
INIT_LIST_HEAD(&tr->events);
|
2018-01-16 02:51:56 +00:00
|
|
|
INIT_LIST_HEAD(&tr->hist_vars);
|
2019-04-02 02:52:21 +00:00
|
|
|
INIT_LIST_HEAD(&tr->err_log);
|
2012-08-03 20:10:49 +00:00
|
|
|
|
2013-03-06 02:13:47 +00:00
|
|
|
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
|
2012-08-03 20:10:49 +00:00
|
|
|
goto out_free_tr;
|
|
|
|
|
2015-01-20 17:13:40 +00:00
|
|
|
tr->dir = tracefs_create_dir(name, trace_instance_dir);
|
2012-08-03 20:10:49 +00:00
|
|
|
if (!tr->dir)
|
|
|
|
goto out_free_tr;
|
|
|
|
|
|
|
|
ret = event_trace_add_tracer(tr->dir, tr);
|
2013-07-11 00:34:34 +00:00
|
|
|
if (ret) {
|
2019-11-18 14:43:10 +00:00
|
|
|
tracefs_remove(tr->dir);
|
2012-08-03 20:10:49 +00:00
|
|
|
goto out_free_tr;
|
2013-07-11 00:34:34 +00:00
|
|
|
}
|
2012-08-03 20:10:49 +00:00
|
|
|
|
2017-04-05 17:12:55 +00:00
|
|
|
ftrace_init_trace_array(tr);
|
|
|
|
|
2015-01-20 17:13:40 +00:00
|
|
|
init_tracer_tracefs(tr, tr->dir);
|
2015-09-30 15:11:15 +00:00
|
|
|
init_trace_flags_index(tr);
|
2015-09-30 18:27:31 +00:00
|
|
|
__update_tracer_options(tr);
|
2012-08-03 20:10:49 +00:00
|
|
|
|
|
|
|
list_add(&tr->list, &ftrace_trace_arrays);
|
|
|
|
|
2019-11-20 19:08:38 +00:00
|
|
|
tr->ref++;
|
|
|
|
|
2012-08-03 20:10:49 +00:00
|
|
|
|
2019-03-20 18:28:51 +00:00
|
|
|
return tr;
|
2012-08-03 20:10:49 +00:00
|
|
|
|
|
|
|
out_free_tr:
|
2014-06-06 04:01:46 +00:00
|
|
|
free_trace_buffers(tr);
|
2013-08-08 16:47:45 +00:00
|
|
|
free_cpumask_var(tr->tracing_cpumask);
|
2012-08-03 20:10:49 +00:00
|
|
|
kfree(tr->name);
|
|
|
|
kfree(tr);
|
|
|
|
|
2019-03-20 18:28:51 +00:00
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2012-08-03 20:10:49 +00:00
|
|
|
|
2019-03-20 18:28:51 +00:00
|
|
|
static int instance_mkdir(const char *name)
|
|
|
|
{
|
2019-11-20 19:08:38 +00:00
|
|
|
struct trace_array *tr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
|
|
ret = -EEXIST;
|
2020-01-29 18:59:21 +00:00
|
|
|
if (trace_array_find(name))
|
|
|
|
goto out_unlock;
|
2019-11-20 19:08:38 +00:00
|
|
|
|
|
|
|
tr = trace_array_create(name);
|
|
|
|
|
|
|
|
ret = PTR_ERR_OR_ZERO(tr);
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* trace_array_get_by_name - Create/Lookup a trace array, given its name.
|
|
|
|
* @name: The name of the trace array to be looked up/created.
|
|
|
|
*
|
|
|
|
* Returns pointer to trace array with given name.
|
|
|
|
* NULL, if it cannot be created.
|
|
|
|
*
|
|
|
|
* NOTE: This function increments the reference counter associated with the
|
|
|
|
* trace array returned. This makes sure it cannot be freed while in use.
|
|
|
|
* Use trace_array_put() once the trace array is no longer needed.
|
2020-01-25 01:47:46 +00:00
|
|
|
* If the trace_array is to be freed, trace_array_destroy() needs to
|
|
|
|
* be called after the trace_array_put(), or simply let user space delete
|
|
|
|
* it from the tracefs instances directory. But until the
|
|
|
|
* trace_array_put() is called, user space can not delete it.
|
2019-11-20 19:08:38 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct trace_array *trace_array_get_by_name(const char *name)
|
|
|
|
{
|
|
|
|
struct trace_array *tr;
|
|
|
|
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
|
|
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
|
|
|
if (tr->name && strcmp(tr->name, name) == 0)
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
tr = trace_array_create(name);
|
|
|
|
|
|
|
|
if (IS_ERR(tr))
|
|
|
|
tr = NULL;
|
|
|
|
out_unlock:
|
|
|
|
if (tr)
|
|
|
|
tr->ref++;
|
|
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
return tr;
|
2012-08-03 20:10:49 +00:00
|
|
|
}
|
2019-11-20 19:08:38 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_array_get_by_name);
|
2012-08-03 20:10:49 +00:00
|
|
|
|
2019-03-20 18:28:51 +00:00
|
|
|
static int __remove_instance(struct trace_array *tr)
|
2012-08-07 20:14:16 +00:00
|
|
|
{
|
2015-09-30 18:27:31 +00:00
|
|
|
int i;
|
2012-08-07 20:14:16 +00:00
|
|
|
|
2019-11-20 19:08:38 +00:00
|
|
|
/* Reference counter for a newly created trace array = 1. */
|
2020-06-30 03:45:56 +00:00
|
|
|
if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
|
2019-03-20 18:28:51 +00:00
|
|
|
return -EBUSY;
|
2013-03-06 20:27:24 +00:00
|
|
|
|
2012-08-07 20:14:16 +00:00
|
|
|
list_del(&tr->list);
|
|
|
|
|
2016-04-26 02:40:12 +00:00
|
|
|
/* Disable all the flags that were enabled coming in */
|
|
|
|
for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
|
|
|
|
if ((1 << i) & ZEROED_TRACE_FLAGS)
|
|
|
|
set_tracer_flag(tr, 1 << i, 0);
|
|
|
|
}
|
|
|
|
|
2014-01-14 13:43:01 +00:00
|
|
|
tracing_set_nop(tr);
|
2017-05-16 17:51:26 +00:00
|
|
|
clear_ftrace_function_probes(tr);
|
2012-08-07 20:14:16 +00:00
|
|
|
event_trace_del_tracer(tr);
|
2017-04-17 02:44:27 +00:00
|
|
|
ftrace_clear_pids(tr);
|
2014-01-10 21:17:45 +00:00
|
|
|
ftrace_destroy_function_files(tr);
|
2019-11-18 14:43:10 +00:00
|
|
|
tracefs_remove(tr->dir);
|
2014-06-07 03:17:28 +00:00
|
|
|
free_trace_buffers(tr);
|
2012-08-07 20:14:16 +00:00
|
|
|
|
2015-09-30 18:27:31 +00:00
|
|
|
for (i = 0; i < tr->nr_topts; i++) {
|
|
|
|
kfree(tr->topts[i].topts);
|
|
|
|
}
|
|
|
|
kfree(tr->topts);
|
|
|
|
|
2017-07-20 10:36:09 +00:00
|
|
|
free_cpumask_var(tr->tracing_cpumask);
|
2012-08-07 20:14:16 +00:00
|
|
|
kfree(tr->name);
|
|
|
|
kfree(tr);
|
2019-03-20 18:28:51 +00:00
|
|
|
tr = NULL;
|
2012-08-07 20:14:16 +00:00
|
|
|
|
2019-03-20 18:28:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-14 17:55:24 +00:00
|
|
|
int trace_array_destroy(struct trace_array *this_tr)
|
2019-03-20 18:28:51 +00:00
|
|
|
{
|
2019-08-14 17:55:24 +00:00
|
|
|
struct trace_array *tr;
|
2019-03-20 18:28:51 +00:00
|
|
|
int ret;
|
|
|
|
|
2019-08-14 17:55:24 +00:00
|
|
|
if (!this_tr)
|
2019-03-20 18:28:51 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
2019-08-14 17:55:24 +00:00
|
|
|
ret = -ENODEV;
|
|
|
|
|
|
|
|
/* Making sure trace array exists before destroying it. */
|
|
|
|
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
|
|
|
if (tr == this_tr) {
|
|
|
|
ret = __remove_instance(tr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-03-20 18:28:51 +00:00
|
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(trace_array_destroy);
|
|
|
|
|
|
|
|
static int instance_rmdir(const char *name)
|
|
|
|
{
|
|
|
|
struct trace_array *tr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
|
|
ret = -ENODEV;
|
2020-01-29 18:59:21 +00:00
|
|
|
tr = trace_array_find(name);
|
|
|
|
if (tr)
|
|
|
|
ret = __remove_instance(tr);
|
2012-08-07 20:14:16 +00:00
|
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
2017-09-21 20:22:49 +00:00
|
|
|
mutex_unlock(&event_mutex);
|
2012-08-07 20:14:16 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-08-03 20:10:49 +00:00
|
|
|
static __init void create_trace_instances(struct dentry *d_tracer)
|
|
|
|
{
|
2015-01-21 15:01:39 +00:00
|
|
|
trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
|
|
|
|
instance_mkdir,
|
|
|
|
instance_rmdir);
|
2020-01-25 15:52:30 +00:00
|
|
|
if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
|
2012-08-03 20:10:49 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
static void
|
2015-01-20 17:13:40 +00:00
|
|
|
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
|
2012-05-11 17:29:49 +00:00
|
|
|
{
|
2018-05-09 18:17:48 +00:00
|
|
|
struct trace_event_file *file;
|
2013-03-06 02:52:25 +00:00
|
|
|
int cpu;
|
2012-05-11 17:29:49 +00:00
|
|
|
|
2013-11-07 03:42:48 +00:00
|
|
|
trace_create_file("available_tracers", 0444, d_tracer,
|
|
|
|
tr, &show_traces_fops);
|
|
|
|
|
|
|
|
trace_create_file("current_tracer", 0644, d_tracer,
|
|
|
|
tr, &set_tracer_fops);
|
|
|
|
|
2013-08-08 16:47:45 +00:00
|
|
|
trace_create_file("tracing_cpumask", 0644, d_tracer,
|
|
|
|
tr, &tracing_cpumask_fops);
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
trace_create_file("trace_options", 0644, d_tracer,
|
|
|
|
tr, &tracing_iter_fops);
|
|
|
|
|
|
|
|
trace_create_file("trace", 0644, d_tracer,
|
2013-07-23 15:26:10 +00:00
|
|
|
tr, &tracing_fops);
|
2012-05-11 17:29:49 +00:00
|
|
|
|
|
|
|
trace_create_file("trace_pipe", 0444, d_tracer,
|
2013-07-23 15:25:57 +00:00
|
|
|
tr, &tracing_pipe_fops);
|
2012-05-11 17:29:49 +00:00
|
|
|
|
|
|
|
trace_create_file("buffer_size_kb", 0644, d_tracer,
|
2013-07-23 15:26:06 +00:00
|
|
|
tr, &tracing_entries_fops);
|
2012-05-11 17:29:49 +00:00
|
|
|
|
|
|
|
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
|
|
|
|
tr, &tracing_total_entries_fops);
|
|
|
|
|
2013-05-26 08:52:01 +00:00
|
|
|
trace_create_file("free_buffer", 0200, d_tracer,
|
2012-05-11 17:29:49 +00:00
|
|
|
tr, &tracing_free_buffer_fops);
|
|
|
|
|
|
|
|
trace_create_file("trace_marker", 0220, d_tracer,
|
|
|
|
tr, &tracing_mark_fops);
|
|
|
|
|
2018-05-09 18:17:48 +00:00
|
|
|
file = __find_event_file(tr, "ftrace", "print");
|
|
|
|
if (file && file->dir)
|
|
|
|
trace_create_file("trigger", 0644, file->dir, file,
|
|
|
|
&event_trigger_fops);
|
|
|
|
tr->trace_marker_file = file;
|
|
|
|
|
2016-07-06 19:25:08 +00:00
|
|
|
trace_create_file("trace_marker_raw", 0220, d_tracer,
|
|
|
|
tr, &tracing_mark_raw_fops);
|
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
trace_create_file("trace_clock", 0644, d_tracer, tr,
|
|
|
|
&trace_clock_fops);
|
|
|
|
|
|
|
|
trace_create_file("tracing_on", 0644, d_tracer,
|
2013-07-23 15:26:10 +00:00
|
|
|
tr, &rb_simple_fops);
|
2013-03-06 02:23:55 +00:00
|
|
|
|
2018-01-16 02:51:41 +00:00
|
|
|
trace_create_file("timestamp_mode", 0444, d_tracer, tr,
|
|
|
|
&trace_time_stamp_mode_fops);
|
|
|
|
|
2018-11-30 03:36:47 +00:00
|
|
|
tr->buffer_percent = 50;
|
2018-11-30 02:38:42 +00:00
|
|
|
|
|
|
|
trace_create_file("buffer_percent", 0444, d_tracer,
|
|
|
|
tr, &buffer_percent_fops);
|
|
|
|
|
2015-09-30 16:30:06 +00:00
|
|
|
create_trace_options_dir(tr);
|
|
|
|
|
2016-09-07 16:45:09 +00:00
|
|
|
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
|
2019-10-08 22:08:21 +00:00
|
|
|
trace_create_maxlat_file(tr, d_tracer);
|
2014-01-14 16:28:38 +00:00
|
|
|
#endif
|
|
|
|
|
2014-01-10 21:17:45 +00:00
|
|
|
if (ftrace_create_function_files(tr, d_tracer))
|
2020-01-25 15:52:30 +00:00
|
|
|
MEM_FAIL(1, "Could not allocate function filter files");
|
2014-01-10 21:17:45 +00:00
|
|
|
|
2013-03-06 02:23:55 +00:00
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
|
|
trace_create_file("snapshot", 0644, d_tracer,
|
2013-07-23 15:26:10 +00:00
|
|
|
tr, &snapshot_fops);
|
2013-03-06 02:23:55 +00:00
|
|
|
#endif
|
2013-03-06 02:52:25 +00:00
|
|
|
|
2019-03-31 23:48:15 +00:00
|
|
|
trace_create_file("error_log", 0644, d_tracer,
|
|
|
|
tr, &tracing_err_log_fops);
|
|
|
|
|
2013-03-06 02:52:25 +00:00
|
|
|
for_each_tracing_cpu(cpu)
|
2015-01-20 17:13:40 +00:00
|
|
|
tracing_init_tracefs_percpu(tr, cpu);
|
2013-03-06 02:52:25 +00:00
|
|
|
|
2016-04-22 22:11:33 +00:00
|
|
|
ftrace_init_tracefs(tr, d_tracer);
|
2012-05-11 17:29:49 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 17:06:16 +00:00
|
|
|
static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
|
2015-01-20 20:48:46 +00:00
|
|
|
{
|
|
|
|
struct vfsmount *mnt;
|
|
|
|
struct file_system_type *type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To maintain backward compatibility for tools that mount
|
|
|
|
* debugfs to get to the tracing facility, tracefs is automatically
|
|
|
|
* mounted to the debugfs/tracing directory.
|
|
|
|
*/
|
|
|
|
type = get_fs_type("tracefs");
|
|
|
|
if (!type)
|
|
|
|
return NULL;
|
2017-01-31 17:06:16 +00:00
|
|
|
mnt = vfs_submount(mntpt, type, "tracefs", NULL);
|
2015-01-20 20:48:46 +00:00
|
|
|
put_filesystem(type);
|
|
|
|
if (IS_ERR(mnt))
|
|
|
|
return NULL;
|
|
|
|
mntget(mnt);
|
|
|
|
|
|
|
|
return mnt;
|
|
|
|
}
|
|
|
|
|
2015-01-27 02:00:48 +00:00
|
|
|
/**
|
|
|
|
* tracing_init_dentry - initialize top level trace array
|
|
|
|
*
|
|
|
|
* This is called when creating files or directories in the tracing
|
|
|
|
* directory. It is called via fs_initcall() by any of the boot up code
|
|
|
|
* and expects to return the dentry of the top level tracing directory.
|
|
|
|
*/
|
2020-07-12 01:10:36 +00:00
|
|
|
int tracing_init_dentry(void)
|
2015-01-27 02:00:48 +00:00
|
|
|
{
|
|
|
|
struct trace_array *tr = &global_trace;
|
|
|
|
|
2019-12-02 21:25:27 +00:00
|
|
|
if (security_locked_down(LOCKDOWN_TRACEFS)) {
|
2019-12-05 22:25:03 +00:00
|
|
|
pr_warn("Tracing disabled due to lockdown\n");
|
2020-07-12 01:10:36 +00:00
|
|
|
return -EPERM;
|
2019-12-02 21:25:27 +00:00
|
|
|
}
|
|
|
|
|
2015-01-20 20:48:46 +00:00
|
|
|
/* The top level trace array uses NULL as parent */
|
2015-01-27 02:00:48 +00:00
|
|
|
if (tr->dir)
|
2020-07-12 01:10:36 +00:00
|
|
|
return 0;
|
2015-01-27 02:00:48 +00:00
|
|
|
|
2020-07-16 07:15:10 +00:00
|
|
|
if (WARN_ON(!tracefs_initialized()))
|
2020-07-12 01:10:36 +00:00
|
|
|
return -ENODEV;
|
2015-01-27 02:00:48 +00:00
|
|
|
|
2015-01-20 20:48:46 +00:00
|
|
|
/*
|
|
|
|
* As there may still be users that expect the tracing
|
|
|
|
* files to exist in debugfs/tracing, we must automount
|
|
|
|
* the tracefs file system there, so older tools still
|
|
|
|
* work with the newer kerenl.
|
|
|
|
*/
|
|
|
|
tr->dir = debugfs_create_automount("tracing", NULL,
|
|
|
|
trace_automount, NULL);
|
2015-01-27 02:00:48 +00:00
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
return 0;
|
2015-01-27 02:00:48 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:43 +00:00
|
|
|
extern struct trace_eval_map *__start_ftrace_eval_maps[];
|
|
|
|
extern struct trace_eval_map *__stop_ftrace_eval_maps[];
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
|
2017-05-31 21:56:47 +00:00
|
|
|
static void __init trace_eval_init(void)
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
{
|
2015-03-25 19:44:21 +00:00
|
|
|
int len;
|
|
|
|
|
2017-05-31 21:56:42 +00:00
|
|
|
len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
|
2017-05-31 21:56:48 +00:00
|
|
|
trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
|
2015-03-25 19:44:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_MODULES
|
2017-05-31 21:56:48 +00:00
|
|
|
static void trace_module_add_evals(struct module *mod)
|
2015-03-25 19:44:21 +00:00
|
|
|
{
|
2017-05-31 21:56:44 +00:00
|
|
|
if (!mod->num_trace_evals)
|
2015-03-25 19:44:21 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Modules with bad taint do not have events created, do
|
|
|
|
* not bother with enums either.
|
|
|
|
*/
|
|
|
|
if (trace_module_has_bad_taint(mod))
|
|
|
|
return;
|
|
|
|
|
2017-05-31 21:56:48 +00:00
|
|
|
trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
|
2015-03-25 19:44:21 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 21:56:53 +00:00
|
|
|
#ifdef CONFIG_TRACE_EVAL_MAP_FILE
|
2017-05-31 21:56:48 +00:00
|
|
|
static void trace_module_remove_evals(struct module *mod)
|
2015-03-31 21:23:45 +00:00
|
|
|
{
|
2017-05-31 21:56:45 +00:00
|
|
|
union trace_eval_map_item *map;
|
|
|
|
union trace_eval_map_item **last = &trace_eval_maps;
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:44 +00:00
|
|
|
if (!mod->num_trace_evals)
|
2015-03-31 21:23:45 +00:00
|
|
|
return;
|
|
|
|
|
2017-05-31 21:56:46 +00:00
|
|
|
mutex_lock(&trace_eval_mutex);
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2017-05-31 21:56:45 +00:00
|
|
|
map = trace_eval_maps;
|
2015-03-31 21:23:45 +00:00
|
|
|
|
|
|
|
while (map) {
|
|
|
|
if (map->head.mod == mod)
|
|
|
|
break;
|
2017-05-31 21:56:47 +00:00
|
|
|
map = trace_eval_jmp_to_tail(map);
|
2015-03-31 21:23:45 +00:00
|
|
|
last = &map->tail.next;
|
|
|
|
map = map->tail.next;
|
|
|
|
}
|
|
|
|
if (!map)
|
|
|
|
goto out;
|
|
|
|
|
2017-05-31 21:56:47 +00:00
|
|
|
*last = trace_eval_jmp_to_tail(map)->tail.next;
|
2015-03-31 21:23:45 +00:00
|
|
|
kfree(map);
|
|
|
|
out:
|
2017-05-31 21:56:46 +00:00
|
|
|
mutex_unlock(&trace_eval_mutex);
|
2015-03-31 21:23:45 +00:00
|
|
|
}
|
|
|
|
#else
|
2017-05-31 21:56:48 +00:00
|
|
|
static inline void trace_module_remove_evals(struct module *mod) { }
|
2017-05-31 21:56:53 +00:00
|
|
|
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2015-03-25 19:44:21 +00:00
|
|
|
static int trace_module_notify(struct notifier_block *self,
|
|
|
|
unsigned long val, void *data)
|
|
|
|
{
|
|
|
|
struct module *mod = data;
|
|
|
|
|
|
|
|
switch (val) {
|
|
|
|
case MODULE_STATE_COMING:
|
2017-05-31 21:56:48 +00:00
|
|
|
trace_module_add_evals(mod);
|
2015-03-25 19:44:21 +00:00
|
|
|
break;
|
2015-03-31 21:23:45 +00:00
|
|
|
case MODULE_STATE_GOING:
|
2017-05-31 21:56:48 +00:00
|
|
|
trace_module_remove_evals(mod);
|
2015-03-31 21:23:45 +00:00
|
|
|
break;
|
2015-03-25 19:44:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
}
|
|
|
|
|
2015-03-25 19:44:21 +00:00
|
|
|
static struct notifier_block trace_module_nb = {
|
|
|
|
.notifier_call = trace_module_notify,
|
|
|
|
.priority = 0,
|
|
|
|
};
|
2015-03-31 21:23:45 +00:00
|
|
|
#endif /* CONFIG_MODULES */
|
2015-03-25 19:44:21 +00:00
|
|
|
|
2015-01-20 17:13:40 +00:00
|
|
|
static __init int tracer_init_tracefs(void)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2020-07-12 01:10:36 +00:00
|
|
|
int ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
tracing: Consolidate protection of reader access to the ring buffer
At the beginning, access to the ring buffer was fully serialized
by trace_types_lock. Patch d7350c3f4569 gives more freedom to readers,
and patch b04cc6b1f6 adds code to protect trace_pipe and cpu#/trace_pipe.
But actually it is not enough, ring buffer readers are not always
read-only, they may consume data.
This patch makes accesses to trace, trace_pipe, trace_pipe_raw
cpu#/trace, cpu#/trace_pipe and cpu#/trace_pipe_raw serialized.
And removes tracing_reader_cpumask which is used to protect trace_pipe.
Details:
Ring buffer serializes readers, but it is low level protection.
The validity of the events (which returns by ring_buffer_peek() ..etc)
are not protected by ring buffer.
The content of events may become garbage if we allow another process to consume
these events concurrently:
A) the page of the consumed events may become a normal page
(not reader page) in ring buffer, and this page will be rewritten
by the events producer.
B) The page of the consumed events may become a page for splice_read,
and this page will be returned to system.
This patch adds trace_access_lock() and trace_access_unlock() primitives.
These primitives allow multi process access to different cpu ring buffers
concurrently.
These primitives don't distinguish read-only and read-consume access.
Multi read-only access is also serialized.
And we don't use these primitives when we open files,
we only use them when we read files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B447D52.1050602@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-01-06 12:08:50 +00:00
|
|
|
trace_access_lock_init();
|
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
ret = tracing_init_dentry();
|
|
|
|
if (ret)
|
2013-04-10 00:18:12 +00:00
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2018-05-08 19:09:27 +00:00
|
|
|
event_trace_init();
|
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
init_tracer_tracefs(&global_trace, NULL);
|
|
|
|
ftrace_init_tracefs_toplevel(&global_trace, NULL);
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
trace_create_file("tracing_thresh", 0644, NULL,
|
2014-07-18 11:17:27 +00:00
|
|
|
&global_trace, &tracing_thresh_fops);
|
2009-02-27 03:19:12 +00:00
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
trace_create_file("README", 0444, NULL,
|
2009-03-26 23:25:38 +00:00
|
|
|
NULL, &tracing_readme_fops);
|
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
trace_create_file("saved_cmdlines", 0444, NULL,
|
2009-04-10 20:04:48 +00:00
|
|
|
NULL, &tracing_saved_cmdlines_fops);
|
2008-09-16 19:06:42 +00:00
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
trace_create_file("saved_cmdlines_size", 0644, NULL,
|
2014-06-05 01:24:27 +00:00
|
|
|
NULL, &tracing_saved_cmdlines_size_fops);
|
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
trace_create_file("saved_tgids", 0444, NULL,
|
2017-07-06 04:07:15 +00:00
|
|
|
NULL, &tracing_saved_tgids_fops);
|
|
|
|
|
2017-05-31 21:56:47 +00:00
|
|
|
trace_eval_init();
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
trace_create_eval_file(NULL);
|
2015-03-31 21:23:45 +00:00
|
|
|
|
2015-03-25 19:44:21 +00:00
|
|
|
#ifdef CONFIG_MODULES
|
|
|
|
register_module_notifier(&trace_module_nb);
|
|
|
|
#endif
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2020-07-12 01:10:36 +00:00
|
|
|
trace_create_file("dyn_ftrace_total_info", 0444, NULL,
|
2019-10-01 18:38:07 +00:00
|
|
|
NULL, &tracing_dyn_info_fops);
|
2008-05-12 19:20:42 +00:00
|
|
|
#endif
|
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-25 02:22:28 +00:00
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
create_trace_instances(NULL);
|
2009-03-26 23:25:38 +00:00
|
|
|
|
2015-09-30 18:27:31 +00:00
|
|
|
update_tracer_options(&global_trace);
|
2015-02-03 17:45:53 +00:00
|
|
|
|
2008-09-23 10:34:32 +00:00
|
|
|
return 0;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2008-07-31 02:36:46 +00:00
|
|
|
static int trace_panic_handler(struct notifier_block *this,
|
|
|
|
unsigned long event, void *unused)
|
|
|
|
{
|
2008-10-23 23:26:08 +00:00
|
|
|
if (ftrace_dump_on_oops)
|
2010-04-18 17:08:41 +00:00
|
|
|
ftrace_dump(ftrace_dump_on_oops);
|
2008-07-31 02:36:46 +00:00
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block trace_panic_notifier = {
|
|
|
|
.notifier_call = trace_panic_handler,
|
|
|
|
.next = NULL,
|
|
|
|
.priority = 150 /* priority: INT_MAX >= x >= 0 */
|
|
|
|
};
|
|
|
|
|
|
|
|
static int trace_die_handler(struct notifier_block *self,
|
|
|
|
unsigned long val,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
switch (val) {
|
|
|
|
case DIE_OOPS:
|
2008-10-23 23:26:08 +00:00
|
|
|
if (ftrace_dump_on_oops)
|
2010-04-18 17:08:41 +00:00
|
|
|
ftrace_dump(ftrace_dump_on_oops);
|
2008-07-31 02:36:46 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block trace_die_notifier = {
|
|
|
|
.notifier_call = trace_die_handler,
|
|
|
|
.priority = 200
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* printk is set to max of 1024, we really don't need it that big.
|
|
|
|
* Nothing should be printing 1000 characters anyway.
|
|
|
|
*/
|
|
|
|
#define TRACE_MAX_PRINT 1000
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Define here KERN_TRACE so that we have one place to modify
|
|
|
|
* it if we decide to change what log level the ftrace dump
|
|
|
|
* should be at.
|
|
|
|
*/
|
2009-01-14 17:24:42 +00:00
|
|
|
#define KERN_TRACE KERN_EMERG
|
2008-07-31 02:36:46 +00:00
|
|
|
|
2010-08-05 14:22:23 +00:00
|
|
|
void
|
2008-07-31 02:36:46 +00:00
|
|
|
trace_printk_seq(struct trace_seq *s)
|
|
|
|
{
|
|
|
|
/* Probably should print a warning here. */
|
2014-06-25 19:54:42 +00:00
|
|
|
if (s->seq.len >= TRACE_MAX_PRINT)
|
|
|
|
s->seq.len = TRACE_MAX_PRINT;
|
2008-07-31 02:36:46 +00:00
|
|
|
|
2014-11-19 15:56:41 +00:00
|
|
|
/*
|
|
|
|
* More paranoid code. Although the buffer size is set to
|
|
|
|
* PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
|
|
|
|
* an extra layer of protection.
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
|
|
|
|
s->seq.len = s->seq.size - 1;
|
2008-07-31 02:36:46 +00:00
|
|
|
|
|
|
|
/* should be zero ended, but we are paranoid. */
|
2014-06-25 19:54:42 +00:00
|
|
|
s->buffer[s->seq.len] = 0;
|
2008-07-31 02:36:46 +00:00
|
|
|
|
|
|
|
printk(KERN_TRACE "%s", s->buffer);
|
|
|
|
|
2009-03-02 19:04:40 +00:00
|
|
|
trace_seq_init(s);
|
2008-07-31 02:36:46 +00:00
|
|
|
}
|
|
|
|
|
2010-08-05 14:22:23 +00:00
|
|
|
void trace_init_global_iter(struct trace_iterator *iter)
|
|
|
|
{
|
|
|
|
iter->tr = &global_trace;
|
2012-05-11 17:29:49 +00:00
|
|
|
iter->trace = iter->tr->current_trace;
|
2013-01-23 20:22:59 +00:00
|
|
|
iter->cpu_file = RING_BUFFER_ALL_CPUS;
|
2020-01-09 23:53:48 +00:00
|
|
|
iter->array_buffer = &global_trace.array_buffer;
|
2013-10-23 18:49:57 +00:00
|
|
|
|
|
|
|
if (iter->trace && iter->trace->open)
|
|
|
|
iter->trace->open(iter);
|
|
|
|
|
|
|
|
/* Annotate start of buffers if we had overruns */
|
2020-01-09 23:53:48 +00:00
|
|
|
if (ring_buffer_overruns(iter->array_buffer->buffer))
|
2013-10-23 18:49:57 +00:00
|
|
|
iter->iter_flags |= TRACE_FILE_ANNOTATE;
|
|
|
|
|
|
|
|
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
|
|
|
|
if (trace_clocks[iter->tr->clock_id].in_ns)
|
|
|
|
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
2010-08-05 14:22:23 +00:00
|
|
|
}
|
|
|
|
|
tracing: Fix ftrace_dump()
ftrace_dump() had a lot of issues. What ftrace_dump() does, is when
ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it
will dump out the ftrace buffers to the console when either a oops,
panic, or a sysrq-z occurs.
This was written a long time ago when ftrace was fragile to recursion.
But it wasn't written well even for that.
There's a possible deadlock that can occur if a ftrace_dump() is happening
and an NMI triggers another dump. This is because it grabs a lock
before checking if the dump ran.
It also totally disables ftrace, and tracing for no good reasons.
As the ring_buffer now checks if it is read via a oops or NMI, where
there's a chance that the buffer gets corrupted, it will disable
itself. No need to have ftrace_dump() do the same.
ftrace_dump() is now cleaned up where it uses an atomic counter to
make sure only one dump happens at a time. A simple atomic_inc_return()
is enough that is needed for both other CPUs and NMIs. No need for
a spinlock, as if one CPU is running the dump, no other CPU needs
to do it too.
The tracing_on variable is turned off and not turned on. The original
code did this, but it wasn't pretty. By just disabling this variable
we get the result of not seeing traces that happen between crashes.
For sysrq-z, it doesn't get turned on, but the user can always write
a '1' to the tracing_on file. If they are using sysrq-z, then they should
know about tracing_on.
The new code is much easier to read and less error prone. No more
deadlock possibility when an NMI triggers here.
Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: stable@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-15 17:10:35 +00:00
|
|
|
void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
2008-07-31 02:36:46 +00:00
|
|
|
{
|
|
|
|
/* use static because iter can be a bit big for the stack */
|
|
|
|
static struct trace_iterator iter;
|
tracing: Fix ftrace_dump()
ftrace_dump() had a lot of issues. What ftrace_dump() does, is when
ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it
will dump out the ftrace buffers to the console when either a oops,
panic, or a sysrq-z occurs.
This was written a long time ago when ftrace was fragile to recursion.
But it wasn't written well even for that.
There's a possible deadlock that can occur if a ftrace_dump() is happening
and an NMI triggers another dump. This is because it grabs a lock
before checking if the dump ran.
It also totally disables ftrace, and tracing for no good reasons.
As the ring_buffer now checks if it is read via a oops or NMI, where
there's a chance that the buffer gets corrupted, it will disable
itself. No need to have ftrace_dump() do the same.
ftrace_dump() is now cleaned up where it uses an atomic counter to
make sure only one dump happens at a time. A simple atomic_inc_return()
is enough that is needed for both other CPUs and NMIs. No need for
a spinlock, as if one CPU is running the dump, no other CPU needs
to do it too.
The tracing_on variable is turned off and not turned on. The original
code did this, but it wasn't pretty. By just disabling this variable
we get the result of not seeing traces that happen between crashes.
For sysrq-z, it doesn't get turned on, but the user can always write
a '1' to the tracing_on file. If they are using sysrq-z, then they should
know about tracing_on.
The new code is much easier to read and less error prone. No more
deadlock possibility when an NMI triggers here.
Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: stable@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-15 17:10:35 +00:00
|
|
|
static atomic_t dump_running;
|
2015-09-30 13:42:05 +00:00
|
|
|
struct trace_array *tr = &global_trace;
|
2009-03-22 04:04:35 +00:00
|
|
|
unsigned int old_userobj;
|
2008-10-01 04:29:53 +00:00
|
|
|
unsigned long flags;
|
|
|
|
int cnt = 0, cpu;
|
2008-07-31 02:36:46 +00:00
|
|
|
|
tracing: Fix ftrace_dump()
ftrace_dump() had a lot of issues. What ftrace_dump() does, is when
ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it
will dump out the ftrace buffers to the console when either a oops,
panic, or a sysrq-z occurs.
This was written a long time ago when ftrace was fragile to recursion.
But it wasn't written well even for that.
There's a possible deadlock that can occur if a ftrace_dump() is happening
and an NMI triggers another dump. This is because it grabs a lock
before checking if the dump ran.
It also totally disables ftrace, and tracing for no good reasons.
As the ring_buffer now checks if it is read via a oops or NMI, where
there's a chance that the buffer gets corrupted, it will disable
itself. No need to have ftrace_dump() do the same.
ftrace_dump() is now cleaned up where it uses an atomic counter to
make sure only one dump happens at a time. A simple atomic_inc_return()
is enough that is needed for both other CPUs and NMIs. No need for
a spinlock, as if one CPU is running the dump, no other CPU needs
to do it too.
The tracing_on variable is turned off and not turned on. The original
code did this, but it wasn't pretty. By just disabling this variable
we get the result of not seeing traces that happen between crashes.
For sysrq-z, it doesn't get turned on, but the user can always write
a '1' to the tracing_on file. If they are using sysrq-z, then they should
know about tracing_on.
The new code is much easier to read and less error prone. No more
deadlock possibility when an NMI triggers here.
Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: stable@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-15 17:10:35 +00:00
|
|
|
/* Only allow one dump user at a time. */
|
|
|
|
if (atomic_inc_return(&dump_running) != 1) {
|
|
|
|
atomic_dec(&dump_running);
|
|
|
|
return;
|
|
|
|
}
|
2008-07-31 02:36:46 +00:00
|
|
|
|
tracing: Fix ftrace_dump()
ftrace_dump() had a lot of issues. What ftrace_dump() does, is when
ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it
will dump out the ftrace buffers to the console when either a oops,
panic, or a sysrq-z occurs.
This was written a long time ago when ftrace was fragile to recursion.
But it wasn't written well even for that.
There's a possible deadlock that can occur if a ftrace_dump() is happening
and an NMI triggers another dump. This is because it grabs a lock
before checking if the dump ran.
It also totally disables ftrace, and tracing for no good reasons.
As the ring_buffer now checks if it is read via a oops or NMI, where
there's a chance that the buffer gets corrupted, it will disable
itself. No need to have ftrace_dump() do the same.
ftrace_dump() is now cleaned up where it uses an atomic counter to
make sure only one dump happens at a time. A simple atomic_inc_return()
is enough that is needed for both other CPUs and NMIs. No need for
a spinlock, as if one CPU is running the dump, no other CPU needs
to do it too.
The tracing_on variable is turned off and not turned on. The original
code did this, but it wasn't pretty. By just disabling this variable
we get the result of not seeing traces that happen between crashes.
For sysrq-z, it doesn't get turned on, but the user can always write
a '1' to the tracing_on file. If they are using sysrq-z, then they should
know about tracing_on.
The new code is much easier to read and less error prone. No more
deadlock possibility when an NMI triggers here.
Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: stable@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-15 17:10:35 +00:00
|
|
|
/*
|
|
|
|
* Always turn off tracing when we dump.
|
|
|
|
* We don't need to show trace output of what happens
|
|
|
|
* between multiple crashes.
|
|
|
|
*
|
|
|
|
* If the user does a sysrq-z, then they can re-enable
|
|
|
|
* tracing with echo 1 > tracing_on.
|
|
|
|
*/
|
2009-01-14 19:50:19 +00:00
|
|
|
tracing_off();
|
2009-03-22 04:04:35 +00:00
|
|
|
|
tracing: Fix ftrace_dump()
ftrace_dump() had a lot of issues. What ftrace_dump() does, is when
ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it
will dump out the ftrace buffers to the console when either a oops,
panic, or a sysrq-z occurs.
This was written a long time ago when ftrace was fragile to recursion.
But it wasn't written well even for that.
There's a possible deadlock that can occur if a ftrace_dump() is happening
and an NMI triggers another dump. This is because it grabs a lock
before checking if the dump ran.
It also totally disables ftrace, and tracing for no good reasons.
As the ring_buffer now checks if it is read via a oops or NMI, where
there's a chance that the buffer gets corrupted, it will disable
itself. No need to have ftrace_dump() do the same.
ftrace_dump() is now cleaned up where it uses an atomic counter to
make sure only one dump happens at a time. A simple atomic_inc_return()
is enough that is needed for both other CPUs and NMIs. No need for
a spinlock, as if one CPU is running the dump, no other CPU needs
to do it too.
The tracing_on variable is turned off and not turned on. The original
code did this, but it wasn't pretty. By just disabling this variable
we get the result of not seeing traces that happen between crashes.
For sysrq-z, it doesn't get turned on, but the user can always write
a '1' to the tracing_on file. If they are using sysrq-z, then they should
know about tracing_on.
The new code is much easier to read and less error prone. No more
deadlock possibility when an NMI triggers here.
Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: stable@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-15 17:10:35 +00:00
|
|
|
local_irq_save(flags);
|
2018-06-27 14:20:28 +00:00
|
|
|
printk_nmi_direct_enter();
|
2008-07-31 02:36:46 +00:00
|
|
|
|
2013-01-25 10:03:07 +00:00
|
|
|
/* Simulate the iterator */
|
2010-08-05 14:22:23 +00:00
|
|
|
trace_init_global_iter(&iter);
|
tracing: Do not allocate buffer in trace_find_next_entry() in atomic
When dumping out the trace data in latency format, a check is made to peek
at the next event to compare its timestamp to the current one, and if the
delta is of a greater size, it will add a marker showing so. But to do this,
it needs to save the current event otherwise peeking at the next event will
remove the current event. To save the event, a temp buffer is used, and if
the event is bigger than the temp buffer, the temp buffer is freed and a
bigger buffer is allocated.
This allocation is a problem when called in atomic context. The only way
this gets called via atomic context is via ftrace_dump(). Thus, use a static
buffer of 128 bytes (which covers most events), and if the event is bigger
than that, simply return NULL. The callers of trace_find_next_entry() need
to handle a NULL case, as that's what would happen if the allocation failed.
Link: https://lore.kernel.org/r/20200326091256.GR11705@shao2-debian
Fixes: ff895103a84ab ("tracing: Save off entry when peeking at next entry")
Reported-by: kernel test robot <rong.a.chen@intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2020-04-02 02:44:46 +00:00
|
|
|
/* Can not use kmalloc for iter.temp */
|
|
|
|
iter.temp = static_temp_buf;
|
|
|
|
iter.temp_size = STATIC_TEMP_BUF_SIZE;
|
2010-08-05 14:22:23 +00:00
|
|
|
|
2008-10-01 04:29:53 +00:00
|
|
|
for_each_tracing_cpu(cpu) {
|
2020-01-09 23:53:48 +00:00
|
|
|
atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
|
2008-10-01 04:29:53 +00:00
|
|
|
}
|
|
|
|
|
2015-09-30 13:42:05 +00:00
|
|
|
old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
|
2009-03-22 04:04:35 +00:00
|
|
|
|
2008-11-22 11:28:48 +00:00
|
|
|
/* don't look at user memory in panic mode */
|
2015-09-30 13:42:05 +00:00
|
|
|
tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
|
2008-11-22 11:28:48 +00:00
|
|
|
|
2010-04-18 17:08:41 +00:00
|
|
|
switch (oops_dump_mode) {
|
|
|
|
case DUMP_ALL:
|
2013-01-23 20:22:59 +00:00
|
|
|
iter.cpu_file = RING_BUFFER_ALL_CPUS;
|
2010-04-18 17:08:41 +00:00
|
|
|
break;
|
|
|
|
case DUMP_ORIG:
|
|
|
|
iter.cpu_file = raw_smp_processor_id();
|
|
|
|
break;
|
|
|
|
case DUMP_NONE:
|
|
|
|
goto out_enable;
|
|
|
|
default:
|
|
|
|
printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
|
2013-01-23 20:22:59 +00:00
|
|
|
iter.cpu_file = RING_BUFFER_ALL_CPUS;
|
2010-04-18 17:08:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_TRACE "Dumping ftrace buffer:\n");
|
2008-07-31 02:36:46 +00:00
|
|
|
|
tracing: Fix ftrace_dump()
ftrace_dump() had a lot of issues. What ftrace_dump() does, is when
ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it
will dump out the ftrace buffers to the console when either a oops,
panic, or a sysrq-z occurs.
This was written a long time ago when ftrace was fragile to recursion.
But it wasn't written well even for that.
There's a possible deadlock that can occur if a ftrace_dump() is happening
and an NMI triggers another dump. This is because it grabs a lock
before checking if the dump ran.
It also totally disables ftrace, and tracing for no good reasons.
As the ring_buffer now checks if it is read via a oops or NMI, where
there's a chance that the buffer gets corrupted, it will disable
itself. No need to have ftrace_dump() do the same.
ftrace_dump() is now cleaned up where it uses an atomic counter to
make sure only one dump happens at a time. A simple atomic_inc_return()
is enough that is needed for both other CPUs and NMIs. No need for
a spinlock, as if one CPU is running the dump, no other CPU needs
to do it too.
The tracing_on variable is turned off and not turned on. The original
code did this, but it wasn't pretty. By just disabling this variable
we get the result of not seeing traces that happen between crashes.
For sysrq-z, it doesn't get turned on, but the user can always write
a '1' to the tracing_on file. If they are using sysrq-z, then they should
know about tracing_on.
The new code is much easier to read and less error prone. No more
deadlock possibility when an NMI triggers here.
Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: stable@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-15 17:10:35 +00:00
|
|
|
/* Did function tracer already get disabled? */
|
|
|
|
if (ftrace_is_dead()) {
|
|
|
|
printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
|
|
|
|
printk("# MAY BE MISSING FUNCTION EVENTS\n");
|
|
|
|
}
|
|
|
|
|
2008-07-31 02:36:46 +00:00
|
|
|
/*
|
|
|
|
* We need to stop all tracing on all CPUS to read the
|
|
|
|
* the next buffer. This is a bit expensive, but is
|
|
|
|
* not done often. We fill all what we can read,
|
|
|
|
* and then release the locks again.
|
|
|
|
*/
|
|
|
|
|
|
|
|
while (!trace_empty(&iter)) {
|
|
|
|
|
|
|
|
if (!cnt)
|
|
|
|
printk(KERN_TRACE "---------------------------------\n");
|
|
|
|
|
|
|
|
cnt++;
|
|
|
|
|
2019-05-23 12:45:35 +00:00
|
|
|
trace_iterator_reset(&iter);
|
2008-07-31 02:36:46 +00:00
|
|
|
iter.iter_flags |= TRACE_FILE_LAT_FMT;
|
|
|
|
|
2010-08-05 14:22:23 +00:00
|
|
|
if (trace_find_next_entry_inc(&iter) != NULL) {
|
2009-07-28 12:17:22 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = print_trace_line(&iter);
|
|
|
|
if (ret != TRACE_TYPE_NO_CONSUME)
|
|
|
|
trace_consume(&iter);
|
2008-07-31 02:36:46 +00:00
|
|
|
}
|
2012-03-02 03:06:48 +00:00
|
|
|
touch_nmi_watchdog();
|
2008-07-31 02:36:46 +00:00
|
|
|
|
|
|
|
trace_printk_seq(&iter.seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cnt)
|
|
|
|
printk(KERN_TRACE " (ftrace buffer empty)\n");
|
|
|
|
else
|
|
|
|
printk(KERN_TRACE "---------------------------------\n");
|
|
|
|
|
2010-04-18 17:08:41 +00:00
|
|
|
out_enable:
|
2015-09-30 13:42:05 +00:00
|
|
|
tr->trace_flags |= old_userobj;
|
2009-03-22 04:04:35 +00:00
|
|
|
|
tracing: Fix ftrace_dump()
ftrace_dump() had a lot of issues. What ftrace_dump() does, is when
ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it
will dump out the ftrace buffers to the console when either a oops,
panic, or a sysrq-z occurs.
This was written a long time ago when ftrace was fragile to recursion.
But it wasn't written well even for that.
There's a possible deadlock that can occur if a ftrace_dump() is happening
and an NMI triggers another dump. This is because it grabs a lock
before checking if the dump ran.
It also totally disables ftrace, and tracing for no good reasons.
As the ring_buffer now checks if it is read via a oops or NMI, where
there's a chance that the buffer gets corrupted, it will disable
itself. No need to have ftrace_dump() do the same.
ftrace_dump() is now cleaned up where it uses an atomic counter to
make sure only one dump happens at a time. A simple atomic_inc_return()
is enough that is needed for both other CPUs and NMIs. No need for
a spinlock, as if one CPU is running the dump, no other CPU needs
to do it too.
The tracing_on variable is turned off and not turned on. The original
code did this, but it wasn't pretty. By just disabling this variable
we get the result of not seeing traces that happen between crashes.
For sysrq-z, it doesn't get turned on, but the user can always write
a '1' to the tracing_on file. If they are using sysrq-z, then they should
know about tracing_on.
The new code is much easier to read and less error prone. No more
deadlock possibility when an NMI triggers here.
Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: stable@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-15 17:10:35 +00:00
|
|
|
for_each_tracing_cpu(cpu) {
|
2020-01-09 23:53:48 +00:00
|
|
|
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
|
2009-03-22 04:04:35 +00:00
|
|
|
}
|
2018-06-27 14:20:28 +00:00
|
|
|
atomic_dec(&dump_running);
|
|
|
|
printk_nmi_direct_exit();
|
2009-04-28 15:39:34 +00:00
|
|
|
local_irq_restore(flags);
|
2008-07-31 02:36:46 +00:00
|
|
|
}
|
2011-10-02 18:01:15 +00:00
|
|
|
EXPORT_SYMBOL_GPL(ftrace_dump);
|
2009-03-22 04:04:35 +00:00
|
|
|
|
2017-09-22 19:58:20 +00:00
|
|
|
int trace_run_command(const char *buf, int (*createfn)(int, char **))
|
|
|
|
{
|
|
|
|
char **argv;
|
|
|
|
int argc, ret;
|
|
|
|
|
|
|
|
argc = 0;
|
|
|
|
ret = 0;
|
|
|
|
argv = argv_split(GFP_KERNEL, buf, &argc);
|
|
|
|
if (!argv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (argc)
|
|
|
|
ret = createfn(argc, argv);
|
|
|
|
|
|
|
|
argv_free(argv);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define WRITE_BUFSIZE 4096
|
|
|
|
|
|
|
|
ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
|
|
|
|
size_t count, loff_t *ppos,
|
|
|
|
int (*createfn)(int, char **))
|
|
|
|
{
|
|
|
|
char *kbuf, *buf, *tmp;
|
|
|
|
int ret = 0;
|
|
|
|
size_t done = 0;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
|
|
|
|
if (!kbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
while (done < count) {
|
|
|
|
size = count - done;
|
|
|
|
|
|
|
|
if (size >= WRITE_BUFSIZE)
|
|
|
|
size = WRITE_BUFSIZE - 1;
|
|
|
|
|
|
|
|
if (copy_from_user(kbuf, buffer + done, size)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
kbuf[size] = '\0';
|
|
|
|
buf = kbuf;
|
|
|
|
do {
|
|
|
|
tmp = strchr(buf, '\n');
|
|
|
|
if (tmp) {
|
|
|
|
*tmp = '\0';
|
|
|
|
size = tmp - buf + 1;
|
|
|
|
} else {
|
|
|
|
size = strlen(buf);
|
|
|
|
if (done + size < count) {
|
|
|
|
if (buf != kbuf)
|
|
|
|
break;
|
|
|
|
/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
|
|
|
|
pr_warn("Line length is too long: Should be less than %d\n",
|
|
|
|
WRITE_BUFSIZE - 2);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done += size;
|
|
|
|
|
|
|
|
/* Remove comments */
|
|
|
|
tmp = strchr(buf, '#');
|
|
|
|
|
|
|
|
if (tmp)
|
|
|
|
*tmp = '\0';
|
|
|
|
|
|
|
|
ret = trace_run_command(buf, createfn);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
buf += size;
|
|
|
|
|
|
|
|
} while (done < count);
|
|
|
|
}
|
|
|
|
ret = done;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(kbuf);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-09-30 03:02:41 +00:00
|
|
|
__init static int tracer_alloc_buffers(void)
|
2008-05-12 19:20:42 +00:00
|
|
|
{
|
2009-03-11 17:42:01 +00:00
|
|
|
int ring_buf_size;
|
2008-12-31 23:42:22 +00:00
|
|
|
int ret = -ENOMEM;
|
2008-05-12 19:20:43 +00:00
|
|
|
|
2019-12-02 21:25:27 +00:00
|
|
|
|
|
|
|
if (security_locked_down(LOCKDOWN_TRACEFS)) {
|
2019-12-05 22:25:03 +00:00
|
|
|
pr_warn("Tracing disabled due to lockdown\n");
|
2019-12-02 21:25:27 +00:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2015-09-29 22:13:33 +00:00
|
|
|
/*
|
|
|
|
* Make sure we don't accidently add more trace options
|
|
|
|
* than we have bits for.
|
|
|
|
*/
|
2015-09-30 15:11:15 +00:00
|
|
|
BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
|
2015-09-29 22:13:33 +00:00
|
|
|
|
2008-12-31 23:42:22 +00:00
|
|
|
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
|
|
|
|
goto out;
|
|
|
|
|
2013-08-08 16:47:45 +00:00
|
|
|
if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
|
2008-12-31 23:42:22 +00:00
|
|
|
goto out_free_buffer_mask;
|
2008-05-12 19:20:43 +00:00
|
|
|
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
/* Only allocate trace_printk buffers if a trace_printk exists */
|
2020-02-20 05:10:12 +00:00
|
|
|
if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
|
2012-10-11 14:15:05 +00:00
|
|
|
/* Must be called before global_trace.buffer is allocated */
|
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.
Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().
Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.
A buffer is made for each type of context:
normal
softirq
irq
nmi
The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.
Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-09-22 18:01:55 +00:00
|
|
|
trace_printk_init_buffers();
|
|
|
|
|
2009-03-11 17:42:01 +00:00
|
|
|
/* To save memory, keep the ring buffer size to its minimum */
|
|
|
|
if (ring_buffer_expanded)
|
|
|
|
ring_buf_size = trace_buf_size;
|
|
|
|
else
|
|
|
|
ring_buf_size = 1;
|
|
|
|
|
2008-12-31 23:42:22 +00:00
|
|
|
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
|
2013-08-08 16:47:45 +00:00
|
|
|
cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
|
2008-12-31 23:42:22 +00:00
|
|
|
|
2012-05-11 17:29:49 +00:00
|
|
|
raw_spin_lock_init(&global_trace.start_lock);
|
|
|
|
|
2016-11-26 23:13:34 +00:00
|
|
|
/*
|
|
|
|
* The prepare callbacks allocates some memory for the ring buffer. We
|
|
|
|
* don't free the buffer if the if the CPU goes down. If we were to free
|
|
|
|
* the buffer, then the user would lose any trace that was in the
|
|
|
|
* buffer. The memory will be removed once the "instance" is removed.
|
|
|
|
*/
|
|
|
|
ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
|
|
|
|
"trace/RB:preapre", trace_rb_cpu_prepare,
|
|
|
|
NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_free_cpumask;
|
2014-03-26 03:39:41 +00:00
|
|
|
/* Used for event triggers */
|
2017-08-01 11:02:01 +00:00
|
|
|
ret = -ENOMEM;
|
2014-03-26 03:39:41 +00:00
|
|
|
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
|
|
|
|
if (!temp_buffer)
|
2016-11-26 23:13:34 +00:00
|
|
|
goto out_rm_hp_state;
|
2014-03-26 03:39:41 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
if (trace_create_savedcmd() < 0)
|
|
|
|
goto out_free_temp_buffer;
|
|
|
|
|
2008-12-31 23:42:22 +00:00
|
|
|
/* TODO: make the number of buffers hot pluggable with CPUS */
|
2013-03-06 02:13:47 +00:00
|
|
|
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
|
2020-01-25 15:52:30 +00:00
|
|
|
MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
|
2014-06-05 01:24:27 +00:00
|
|
|
goto out_free_savedcmd;
|
2008-05-12 19:20:43 +00:00
|
|
|
}
|
2012-08-06 20:24:11 +00:00
|
|
|
|
2012-02-22 20:50:28 +00:00
|
|
|
if (global_trace.buffer_disabled)
|
|
|
|
tracing_off();
|
2008-05-12 19:20:43 +00:00
|
|
|
|
2014-02-11 04:38:46 +00:00
|
|
|
if (trace_boot_clock) {
|
|
|
|
ret = tracing_set_clock(&global_trace, trace_boot_clock);
|
|
|
|
if (ret < 0)
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Trace clock %s not defined, going back to default\n",
|
|
|
|
trace_boot_clock);
|
2014-02-11 04:38:46 +00:00
|
|
|
}
|
|
|
|
|
2013-05-23 15:51:10 +00:00
|
|
|
/*
|
|
|
|
* register_tracer() might reference current_trace, so it
|
|
|
|
* needs to be set before we register anything. This is
|
|
|
|
* just a bootstrap of current_trace anyway.
|
|
|
|
*/
|
2012-05-11 17:29:49 +00:00
|
|
|
global_trace.current_trace = &nop_trace;
|
|
|
|
|
2014-01-14 15:04:59 +00:00
|
|
|
global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
|
2014-01-10 22:01:58 +00:00
|
|
|
ftrace_init_global_array_ops(&global_trace);
|
|
|
|
|
2015-09-30 15:11:15 +00:00
|
|
|
init_trace_flags_index(&global_trace);
|
|
|
|
|
2013-05-23 15:51:10 +00:00
|
|
|
register_tracer(&nop_trace);
|
|
|
|
|
2017-03-03 18:48:42 +00:00
|
|
|
/* Function tracing may start here (via kernel command line) */
|
|
|
|
init_function_trace();
|
|
|
|
|
2008-05-12 19:20:44 +00:00
|
|
|
/* All seems OK, enable tracing */
|
|
|
|
tracing_disabled = 0;
|
2008-09-30 03:02:41 +00:00
|
|
|
|
2008-07-31 02:36:46 +00:00
|
|
|
atomic_notifier_chain_register(&panic_notifier_list,
|
|
|
|
&trace_panic_notifier);
|
|
|
|
|
|
|
|
register_die_notifier(&trace_die_notifier);
|
2009-03-16 00:45:03 +00:00
|
|
|
|
2012-05-04 03:09:03 +00:00
|
|
|
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&global_trace.systems);
|
|
|
|
INIT_LIST_HEAD(&global_trace.events);
|
2018-01-16 02:51:56 +00:00
|
|
|
INIT_LIST_HEAD(&global_trace.hist_vars);
|
2019-04-02 02:52:21 +00:00
|
|
|
INIT_LIST_HEAD(&global_trace.err_log);
|
2012-05-04 03:09:03 +00:00
|
|
|
list_add(&global_trace.list, &ftrace_trace_arrays);
|
|
|
|
|
2015-11-04 01:14:29 +00:00
|
|
|
apply_trace_boot_options();
|
2012-11-02 02:56:07 +00:00
|
|
|
|
2013-03-12 15:49:18 +00:00
|
|
|
register_snapshot_cmd();
|
|
|
|
|
2009-03-16 00:45:03 +00:00
|
|
|
return 0;
|
2008-07-31 02:36:46 +00:00
|
|
|
|
2014-06-05 01:24:27 +00:00
|
|
|
out_free_savedcmd:
|
|
|
|
free_saved_cmdlines_buffer(savedcmd);
|
2014-03-26 03:39:41 +00:00
|
|
|
out_free_temp_buffer:
|
|
|
|
ring_buffer_free(temp_buffer);
|
2016-11-26 23:13:34 +00:00
|
|
|
out_rm_hp_state:
|
|
|
|
cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
|
2008-12-31 23:42:22 +00:00
|
|
|
out_free_cpumask:
|
2013-08-08 16:47:45 +00:00
|
|
|
free_cpumask_var(global_trace.tracing_cpumask);
|
2008-12-31 23:42:22 +00:00
|
|
|
out_free_buffer_mask:
|
|
|
|
free_cpumask_var(tracing_buffer_mask);
|
|
|
|
out:
|
|
|
|
return ret;
|
2008-05-12 19:20:42 +00:00
|
|
|
}
|
2009-02-03 02:38:32 +00:00
|
|
|
|
2017-03-03 18:37:33 +00:00
|
|
|
void __init early_trace_init(void)
|
2014-12-13 01:05:10 +00:00
|
|
|
{
|
2014-12-13 03:27:10 +00:00
|
|
|
if (tracepoint_printk) {
|
|
|
|
tracepoint_print_iter =
|
|
|
|
kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
|
2020-01-25 15:52:30 +00:00
|
|
|
if (MEM_FAIL(!tracepoint_print_iter,
|
|
|
|
"Failed to allocate trace iterator\n"))
|
2014-12-13 03:27:10 +00:00
|
|
|
tracepoint_printk = 0;
|
2016-11-23 20:52:45 +00:00
|
|
|
else
|
|
|
|
static_key_enable(&tracepoint_printk_key.key);
|
2014-12-13 03:27:10 +00:00
|
|
|
}
|
2014-12-13 01:05:10 +00:00
|
|
|
tracer_alloc_buffers();
|
2017-03-03 18:37:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init trace_init(void)
|
|
|
|
{
|
tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values
Several tracepoints use the helper functions __print_symbolic() or
__print_flags() and pass in enums that do the mapping between the
binary data stored and the value to print. This works well for reading
the ASCII trace files, but when the data is read via userspace tools
such as perf and trace-cmd, the conversion of the binary value to a
human string format is lost if an enum is used, as userspace does not
have access to what the ENUM is.
For example, the tracepoint trace_tlb_flush() has:
__print_symbolic(REC->reason,
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" },
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" },
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" },
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" })
Which maps the enum values to the strings they represent. But perf and
trace-cmd do no know what value TLB_LOCAL_MM_SHOOTDOWN is, and would
not be able to map it.
With TRACE_DEFINE_ENUM(), developers can place these in the event header
files and ftrace will convert the enums to their values:
By adding:
TRACE_DEFINE_ENUM(TLB_FLUSH_ON_TASK_SWITCH);
TRACE_DEFINE_ENUM(TLB_REMOTE_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_SHOOTDOWN);
TRACE_DEFINE_ENUM(TLB_LOCAL_MM_SHOOTDOWN);
$ cat /sys/kernel/debug/tracing/events/tlb/tlb_flush/format
[...]
__print_symbolic(REC->reason,
{ 0, "flush on task switch" },
{ 1, "remote shootdown" },
{ 2, "local shootdown" },
{ 3, "local mm shootdown" })
The above is what userspace expects to see, and tools do not need to
be modified to parse them.
Link: http://lkml.kernel.org/r/20150403013802.220157513@goodmis.org
Cc: Guilherme Cox <cox@computer.org>
Cc: Tony Luck <tony.luck@gmail.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2015-03-24 21:58:09 +00:00
|
|
|
trace_event_init();
|
2014-12-13 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
2009-02-03 02:38:32 +00:00
|
|
|
__init static int clear_boot_tracer(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The default tracer at boot buffer is an init section.
|
|
|
|
* This function is called in lateinit. If we did not
|
|
|
|
* find the boot tracer, then clear it out, to prevent
|
|
|
|
* later registration from accessing the buffer that is
|
|
|
|
* about to be freed.
|
|
|
|
*/
|
|
|
|
if (!default_bootup_tracer)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
|
|
|
|
default_bootup_tracer);
|
|
|
|
default_bootup_tracer = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-20 17:13:40 +00:00
|
|
|
fs_initcall(tracer_init_tracefs);
|
2017-08-01 16:01:52 +00:00
|
|
|
late_initcall_sync(clear_boot_tracer);
|
2018-03-30 15:01:31 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
|
|
|
__init static int tracing_set_default_clock(void)
|
|
|
|
{
|
|
|
|
/* sched_clock_stable() is determined in late_initcall */
|
2018-04-04 21:24:50 +00:00
|
|
|
if (!trace_boot_clock && !sched_clock_stable()) {
|
2020-01-16 13:12:36 +00:00
|
|
|
if (security_locked_down(LOCKDOWN_TRACEFS)) {
|
|
|
|
pr_warn("Can not set tracing clock due to lockdown\n");
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2018-03-30 15:01:31 +00:00
|
|
|
printk(KERN_WARNING
|
|
|
|
"Unstable clock detected, switching default tracing clock to \"global\"\n"
|
|
|
|
"If you want to keep using the local clock, then add:\n"
|
|
|
|
" \"trace_clock=local\"\n"
|
|
|
|
"on the kernel command line\n");
|
|
|
|
tracing_set_clock(&global_trace, "global");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
late_initcall_sync(tracing_set_default_clock);
|
|
|
|
#endif
|