2009-02-24 15:21:36 +00:00
|
|
|
/*
|
|
|
|
* event tracer
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
|
|
|
|
*
|
2009-03-02 18:53:59 +00:00
|
|
|
* - Added format output of fields of the trace point.
|
|
|
|
* This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
|
|
|
|
*
|
2009-02-24 15:21:36 +00:00
|
|
|
*/
|
|
|
|
|
2009-04-15 17:36:40 +00:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/kthread.h>
|
2009-02-24 15:21:36 +00:00
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/ctype.h>
|
2009-04-15 17:36:40 +00:00
|
|
|
#include <linux/delay.h>
|
2009-02-24 15:21:36 +00:00
|
|
|
|
2009-03-02 20:03:01 +00:00
|
|
|
#include "trace_output.h"
|
2009-02-24 15:21:36 +00:00
|
|
|
|
2009-02-28 04:32:58 +00:00
|
|
|
#define TRACE_SYSTEM "TRACE_SYSTEM"
|
|
|
|
|
2009-03-02 16:49:04 +00:00
|
|
|
static DEFINE_MUTEX(event_mutex);
|
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
LIST_HEAD(ftrace_events);
|
|
|
|
|
2009-03-22 08:30:39 +00:00
|
|
|
int trace_define_field(struct ftrace_event_call *call, char *type,
|
|
|
|
char *name, int offset, int size)
|
|
|
|
{
|
|
|
|
struct ftrace_event_field *field;
|
|
|
|
|
2009-03-22 17:41:59 +00:00
|
|
|
field = kzalloc(sizeof(*field), GFP_KERNEL);
|
2009-03-22 08:30:39 +00:00
|
|
|
if (!field)
|
|
|
|
goto err;
|
2009-03-22 17:41:59 +00:00
|
|
|
|
2009-03-22 08:30:39 +00:00
|
|
|
field->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!field->name)
|
|
|
|
goto err;
|
2009-03-22 17:41:59 +00:00
|
|
|
|
2009-03-22 08:30:39 +00:00
|
|
|
field->type = kstrdup(type, GFP_KERNEL);
|
|
|
|
if (!field->type)
|
|
|
|
goto err;
|
2009-03-22 17:41:59 +00:00
|
|
|
|
2009-03-22 08:30:39 +00:00
|
|
|
field->offset = offset;
|
|
|
|
field->size = size;
|
|
|
|
list_add(&field->link, &call->fields);
|
|
|
|
|
|
|
|
return 0;
|
2009-03-22 17:41:59 +00:00
|
|
|
|
2009-03-22 08:30:39 +00:00
|
|
|
err:
|
|
|
|
if (field) {
|
|
|
|
kfree(field->name);
|
|
|
|
kfree(field->type);
|
|
|
|
}
|
|
|
|
kfree(field);
|
2009-03-22 17:41:59 +00:00
|
|
|
|
2009-03-22 08:30:39 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2009-04-10 22:12:50 +00:00
|
|
|
EXPORT_SYMBOL_GPL(trace_define_field);
|
2009-03-22 08:30:39 +00:00
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
static void ftrace_clear_events(void)
|
|
|
|
{
|
2009-04-10 17:52:20 +00:00
|
|
|
struct ftrace_event_call *call;
|
2009-02-24 15:21:36 +00:00
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
list_for_each_entry(call, &ftrace_events, list) {
|
2009-02-24 15:21:36 +00:00
|
|
|
|
|
|
|
if (call->enabled) {
|
|
|
|
call->enabled = 0;
|
|
|
|
call->unregfunc();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 07:41:25 +00:00
|
|
|
static void ftrace_event_enable_disable(struct ftrace_event_call *call,
|
|
|
|
int enable)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (enable) {
|
|
|
|
case 0:
|
|
|
|
if (call->enabled) {
|
|
|
|
call->enabled = 0;
|
|
|
|
call->unregfunc();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
tracing: new format for specialized trace points
Impact: clean up and enhancement
The TRACE_EVENT_FORMAT macro looks quite ugly and is limited in its
ability to save data as well as to print the record out. Working with
Ingo Molnar, we came up with a new format that is much more pleasing to
the eye of C developers. This new macro is more C style than the old
macro, and is more obvious to what it does.
Here's the example. The only updated macro in this patch is the
sched_switch trace point.
The old method looked like this:
TRACE_EVENT_FORMAT(sched_switch,
TP_PROTO(struct rq *rq, struct task_struct *prev,
struct task_struct *next),
TP_ARGS(rq, prev, next),
TP_FMT("task %s:%d ==> %s:%d",
prev->comm, prev->pid, next->comm, next->pid),
TRACE_STRUCT(
TRACE_FIELD(pid_t, prev_pid, prev->pid)
TRACE_FIELD(int, prev_prio, prev->prio)
TRACE_FIELD_SPECIAL(char next_comm[TASK_COMM_LEN],
next_comm,
TP_CMD(memcpy(TRACE_ENTRY->next_comm,
next->comm,
TASK_COMM_LEN)))
TRACE_FIELD(pid_t, next_pid, next->pid)
TRACE_FIELD(int, next_prio, next->prio)
),
TP_RAW_FMT("prev %d:%d ==> next %s:%d:%d")
);
The above method is hard to read and requires two format fields.
The new method:
/*
* Tracepoint for task switches, performed by the scheduler:
*
* (NOTE: the 'rq' argument is not used by generic trace events,
* but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct rq *rq, struct task_struct *prev,
struct task_struct *next),
TP_ARGS(rq, prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->next_comm, __entry->next_pid, __entry->next_prio),
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
)
);
This macro is called TRACE_EVENT, it is broken up into 5 parts:
TP_PROTO: the proto type of the trace point
TP_ARGS: the arguments of the trace point
TP_STRUCT_entry: the structure layout of the entry in the ring buffer
TP_printk: the printk format
TP_fast_assign: the method used to write the entry into the ring buffer
The structure is the definition of how the event will be saved in the
ring buffer. The printk is used by the internal tracing in case of
an oops, and the kernel needs to print out the format of the record
to the console. This the TP_printk gives a means to show the records
in a human readable format. It is also used to print out the data
from the trace file.
The TP_fast_assign is executed directly. It is basically like a C function,
where the __entry is the handle to the record.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-03-09 21:14:30 +00:00
|
|
|
if (!call->enabled) {
|
2009-02-28 07:41:25 +00:00
|
|
|
call->enabled = 1;
|
|
|
|
call->regfunc();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
static int ftrace_set_clr_event(char *buf, int set)
|
|
|
|
{
|
2009-04-10 17:52:20 +00:00
|
|
|
struct ftrace_event_call *call;
|
2009-02-28 04:32:58 +00:00
|
|
|
char *event = NULL, *sub = NULL, *match;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The buf format can be <subsystem>:<event-name>
|
|
|
|
* *:<event-name> means any event by that name.
|
|
|
|
* :<event-name> is the same.
|
|
|
|
*
|
|
|
|
* <subsystem>:* means all events in that subsystem
|
|
|
|
* <subsystem>: means the same.
|
|
|
|
*
|
|
|
|
* <name> (no ':') means all events in a subsystem with
|
|
|
|
* the name <name> or any event that matches <name>
|
|
|
|
*/
|
|
|
|
|
|
|
|
match = strsep(&buf, ":");
|
|
|
|
if (buf) {
|
|
|
|
sub = match;
|
|
|
|
event = buf;
|
|
|
|
match = NULL;
|
|
|
|
|
|
|
|
if (!strlen(sub) || strcmp(sub, "*") == 0)
|
|
|
|
sub = NULL;
|
|
|
|
if (!strlen(event) || strcmp(event, "*") == 0)
|
|
|
|
event = NULL;
|
|
|
|
}
|
2009-02-24 15:21:36 +00:00
|
|
|
|
2009-03-02 16:49:04 +00:00
|
|
|
mutex_lock(&event_mutex);
|
2009-04-10 17:52:20 +00:00
|
|
|
list_for_each_entry(call, &ftrace_events, list) {
|
2009-02-24 15:21:36 +00:00
|
|
|
|
2009-03-10 15:32:40 +00:00
|
|
|
if (!call->name || !call->regfunc)
|
2009-02-24 19:15:08 +00:00
|
|
|
continue;
|
|
|
|
|
2009-02-28 04:32:58 +00:00
|
|
|
if (match &&
|
|
|
|
strcmp(match, call->name) != 0 &&
|
|
|
|
strcmp(match, call->system) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sub && strcmp(sub, call->system) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (event && strcmp(event, call->name) != 0)
|
2009-02-24 15:21:36 +00:00
|
|
|
continue;
|
|
|
|
|
2009-02-28 07:41:25 +00:00
|
|
|
ftrace_event_enable_disable(call, set);
|
|
|
|
|
2009-02-28 04:32:58 +00:00
|
|
|
ret = 0;
|
2009-02-24 15:21:36 +00:00
|
|
|
}
|
2009-03-02 16:49:04 +00:00
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
|
2009-02-28 04:32:58 +00:00
|
|
|
return ret;
|
2009-02-24 15:21:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* 128 should be much more than enough */
|
|
|
|
#define EVENT_BUF_SIZE 127
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
ftrace_event_write(struct file *file, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
size_t read = 0;
|
|
|
|
int i, set = 1;
|
|
|
|
ssize_t ret;
|
|
|
|
char *buf;
|
|
|
|
char ch;
|
|
|
|
|
|
|
|
if (!cnt || cnt < 0)
|
|
|
|
return 0;
|
|
|
|
|
2009-03-11 18:33:00 +00:00
|
|
|
ret = tracing_update_buffers();
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
ret = get_user(ch, ubuf++);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
read++;
|
|
|
|
cnt--;
|
|
|
|
|
|
|
|
/* skip white space */
|
|
|
|
while (cnt && isspace(ch)) {
|
|
|
|
ret = get_user(ch, ubuf++);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
read++;
|
|
|
|
cnt--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only white space found? */
|
|
|
|
if (isspace(ch)) {
|
|
|
|
file->f_pos += read;
|
|
|
|
ret = read;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (cnt > EVENT_BUF_SIZE)
|
|
|
|
cnt = EVENT_BUF_SIZE;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
while (cnt && !isspace(ch)) {
|
|
|
|
if (!i && ch == '!')
|
|
|
|
set = 0;
|
|
|
|
else
|
|
|
|
buf[i++] = ch;
|
|
|
|
|
|
|
|
ret = get_user(ch, ubuf++);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
read++;
|
|
|
|
cnt--;
|
|
|
|
}
|
|
|
|
buf[i] = 0;
|
|
|
|
|
|
|
|
file->f_pos += read;
|
|
|
|
|
|
|
|
ret = ftrace_set_clr_event(buf, set);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = read;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
kfree(buf);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
t_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
2009-04-10 17:52:20 +00:00
|
|
|
struct list_head *list = m->private;
|
|
|
|
struct ftrace_event_call *call;
|
2009-02-24 15:21:36 +00:00
|
|
|
|
|
|
|
(*pos)++;
|
|
|
|
|
2009-03-10 15:32:40 +00:00
|
|
|
for (;;) {
|
2009-04-10 17:52:20 +00:00
|
|
|
if (list == &ftrace_events)
|
2009-03-10 15:32:40 +00:00
|
|
|
return NULL;
|
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
call = list_entry(list, struct ftrace_event_call, list);
|
|
|
|
|
2009-03-10 15:32:40 +00:00
|
|
|
/*
|
|
|
|
* The ftrace subsystem is for showing formats only.
|
|
|
|
* They can not be enabled or disabled via the event files.
|
|
|
|
*/
|
|
|
|
if (call->regfunc)
|
|
|
|
break;
|
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
list = list->next;
|
2009-03-10 15:32:40 +00:00
|
|
|
}
|
2009-02-24 15:21:36 +00:00
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
m->private = list->next;
|
2009-02-24 15:21:36 +00:00
|
|
|
|
|
|
|
return call;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *t_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
|
|
|
return t_next(m, NULL, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
s_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
2009-04-10 17:52:20 +00:00
|
|
|
struct list_head *list = m->private;
|
|
|
|
struct ftrace_event_call *call;
|
2009-02-24 15:21:36 +00:00
|
|
|
|
|
|
|
(*pos)++;
|
|
|
|
|
|
|
|
retry:
|
2009-04-10 17:52:20 +00:00
|
|
|
if (list == &ftrace_events)
|
2009-02-24 15:21:36 +00:00
|
|
|
return NULL;
|
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
call = list_entry(list, struct ftrace_event_call, list);
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
if (!call->enabled) {
|
2009-04-10 17:52:20 +00:00
|
|
|
list = list->next;
|
2009-02-24 15:21:36 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
m->private = list->next;
|
2009-02-24 15:21:36 +00:00
|
|
|
|
|
|
|
return call;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *s_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
|
|
|
return s_next(m, NULL, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct ftrace_event_call *call = v;
|
|
|
|
|
2009-02-28 04:32:58 +00:00
|
|
|
if (strcmp(call->system, TRACE_SYSTEM) != 0)
|
|
|
|
seq_printf(m, "%s:", call->system);
|
2009-02-24 15:21:36 +00:00
|
|
|
seq_printf(m, "%s\n", call->name);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t_stop(struct seq_file *m, void *p)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ftrace_event_seq_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const struct seq_operations *seq_ops;
|
|
|
|
|
|
|
|
if ((file->f_mode & FMODE_WRITE) &&
|
|
|
|
!(file->f_flags & O_APPEND))
|
|
|
|
ftrace_clear_events();
|
|
|
|
|
|
|
|
seq_ops = inode->i_private;
|
|
|
|
ret = seq_open(file, seq_ops);
|
|
|
|
if (!ret) {
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
m->private = ftrace_events.next;
|
2009-02-24 15:21:36 +00:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
static ssize_t
|
|
|
|
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct ftrace_event_call *call = filp->private_data;
|
|
|
|
char *buf;
|
|
|
|
|
tracing: new format for specialized trace points
Impact: clean up and enhancement
The TRACE_EVENT_FORMAT macro looks quite ugly and is limited in its
ability to save data as well as to print the record out. Working with
Ingo Molnar, we came up with a new format that is much more pleasing to
the eye of C developers. This new macro is more C style than the old
macro, and is more obvious to what it does.
Here's the example. The only updated macro in this patch is the
sched_switch trace point.
The old method looked like this:
TRACE_EVENT_FORMAT(sched_switch,
TP_PROTO(struct rq *rq, struct task_struct *prev,
struct task_struct *next),
TP_ARGS(rq, prev, next),
TP_FMT("task %s:%d ==> %s:%d",
prev->comm, prev->pid, next->comm, next->pid),
TRACE_STRUCT(
TRACE_FIELD(pid_t, prev_pid, prev->pid)
TRACE_FIELD(int, prev_prio, prev->prio)
TRACE_FIELD_SPECIAL(char next_comm[TASK_COMM_LEN],
next_comm,
TP_CMD(memcpy(TRACE_ENTRY->next_comm,
next->comm,
TASK_COMM_LEN)))
TRACE_FIELD(pid_t, next_pid, next->pid)
TRACE_FIELD(int, next_prio, next->prio)
),
TP_RAW_FMT("prev %d:%d ==> next %s:%d:%d")
);
The above method is hard to read and requires two format fields.
The new method:
/*
* Tracepoint for task switches, performed by the scheduler:
*
* (NOTE: the 'rq' argument is not used by generic trace events,
* but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct rq *rq, struct task_struct *prev,
struct task_struct *next),
TP_ARGS(rq, prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->next_comm, __entry->next_pid, __entry->next_prio),
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
)
);
This macro is called TRACE_EVENT, it is broken up into 5 parts:
TP_PROTO: the proto type of the trace point
TP_ARGS: the arguments of the trace point
TP_STRUCT_entry: the structure layout of the entry in the ring buffer
TP_printk: the printk format
TP_fast_assign: the method used to write the entry into the ring buffer
The structure is the definition of how the event will be saved in the
ring buffer. The printk is used by the internal tracing in case of
an oops, and the kernel needs to print out the format of the record
to the console. This the TP_printk gives a means to show the records
in a human readable format. It is also used to print out the data
from the trace file.
The TP_fast_assign is executed directly. It is basically like a C function,
where the __entry is the handle to the record.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-03-09 21:14:30 +00:00
|
|
|
if (call->enabled)
|
2009-02-24 19:15:08 +00:00
|
|
|
buf = "1\n";
|
|
|
|
else
|
|
|
|
buf = "0\n";
|
|
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct ftrace_event_call *call = filp->private_data;
|
|
|
|
char buf[64];
|
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cnt >= sizeof(buf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, ubuf, cnt))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
buf[cnt] = 0;
|
|
|
|
|
|
|
|
ret = strict_strtoul(buf, 10, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2009-03-11 18:33:00 +00:00
|
|
|
ret = tracing_update_buffers();
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
switch (val) {
|
|
|
|
case 0:
|
|
|
|
case 1:
|
2009-03-02 16:49:04 +00:00
|
|
|
mutex_lock(&event_mutex);
|
2009-02-28 07:41:25 +00:00
|
|
|
ftrace_event_enable_disable(call, val);
|
2009-03-02 16:49:04 +00:00
|
|
|
mutex_unlock(&event_mutex);
|
2009-02-24 19:15:08 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*ppos += cnt;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2009-03-26 15:43:36 +00:00
|
|
|
extern char *__bad_type_size(void);
|
|
|
|
|
2009-03-02 20:03:01 +00:00
|
|
|
#undef FIELD
|
2009-03-06 15:50:53 +00:00
|
|
|
#define FIELD(type, name) \
|
2009-03-26 15:43:36 +00:00
|
|
|
sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
|
2009-03-22 08:30:39 +00:00
|
|
|
#type, "common_" #name, offsetof(typeof(field), name), \
|
|
|
|
sizeof(field.name)
|
2009-03-02 20:03:01 +00:00
|
|
|
|
|
|
|
static int trace_write_header(struct trace_seq *s)
|
|
|
|
{
|
|
|
|
struct trace_entry field;
|
|
|
|
|
|
|
|
/* struct trace_entry */
|
|
|
|
return trace_seq_printf(s,
|
2009-03-10 14:14:35 +00:00
|
|
|
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
|
|
|
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
|
|
|
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
|
|
|
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
|
|
|
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
2009-03-02 20:03:01 +00:00
|
|
|
"\n",
|
2009-03-26 15:03:29 +00:00
|
|
|
FIELD(unsigned short, type),
|
2009-03-02 20:03:01 +00:00
|
|
|
FIELD(unsigned char, flags),
|
|
|
|
FIELD(unsigned char, preempt_count),
|
|
|
|
FIELD(int, pid),
|
|
|
|
FIELD(int, tgid));
|
|
|
|
}
|
tracing: new format for specialized trace points
Impact: clean up and enhancement
The TRACE_EVENT_FORMAT macro looks quite ugly and is limited in its
ability to save data as well as to print the record out. Working with
Ingo Molnar, we came up with a new format that is much more pleasing to
the eye of C developers. This new macro is more C style than the old
macro, and is more obvious to what it does.
Here's the example. The only updated macro in this patch is the
sched_switch trace point.
The old method looked like this:
TRACE_EVENT_FORMAT(sched_switch,
TP_PROTO(struct rq *rq, struct task_struct *prev,
struct task_struct *next),
TP_ARGS(rq, prev, next),
TP_FMT("task %s:%d ==> %s:%d",
prev->comm, prev->pid, next->comm, next->pid),
TRACE_STRUCT(
TRACE_FIELD(pid_t, prev_pid, prev->pid)
TRACE_FIELD(int, prev_prio, prev->prio)
TRACE_FIELD_SPECIAL(char next_comm[TASK_COMM_LEN],
next_comm,
TP_CMD(memcpy(TRACE_ENTRY->next_comm,
next->comm,
TASK_COMM_LEN)))
TRACE_FIELD(pid_t, next_pid, next->pid)
TRACE_FIELD(int, next_prio, next->prio)
),
TP_RAW_FMT("prev %d:%d ==> next %s:%d:%d")
);
The above method is hard to read and requires two format fields.
The new method:
/*
* Tracepoint for task switches, performed by the scheduler:
*
* (NOTE: the 'rq' argument is not used by generic trace events,
* but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct rq *rq, struct task_struct *prev,
struct task_struct *next),
TP_ARGS(rq, prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->next_comm, __entry->next_pid, __entry->next_prio),
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
)
);
This macro is called TRACE_EVENT, it is broken up into 5 parts:
TP_PROTO: the proto type of the trace point
TP_ARGS: the arguments of the trace point
TP_STRUCT_entry: the structure layout of the entry in the ring buffer
TP_printk: the printk format
TP_fast_assign: the method used to write the entry into the ring buffer
The structure is the definition of how the event will be saved in the
ring buffer. The printk is used by the internal tracing in case of
an oops, and the kernel needs to print out the format of the record
to the console. This the TP_printk gives a means to show the records
in a human readable format. It is also used to print out the data
from the trace file.
The TP_fast_assign is executed directly. It is basically like a C function,
where the __entry is the handle to the record.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-03-09 21:14:30 +00:00
|
|
|
|
2009-03-02 18:53:59 +00:00
|
|
|
static ssize_t
|
|
|
|
event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct ftrace_event_call *call = filp->private_data;
|
|
|
|
struct trace_seq *s;
|
|
|
|
char *buf;
|
|
|
|
int r;
|
|
|
|
|
2009-03-17 06:20:59 +00:00
|
|
|
if (*ppos)
|
|
|
|
return 0;
|
|
|
|
|
2009-03-02 18:53:59 +00:00
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
trace_seq_init(s);
|
|
|
|
|
2009-03-02 20:10:02 +00:00
|
|
|
/* If any of the first writes fail, so will the show_format. */
|
|
|
|
|
|
|
|
trace_seq_printf(s, "name: %s\n", call->name);
|
|
|
|
trace_seq_printf(s, "ID: %d\n", call->id);
|
|
|
|
trace_seq_printf(s, "format:\n");
|
2009-03-02 20:03:01 +00:00
|
|
|
trace_write_header(s);
|
|
|
|
|
2009-03-02 18:53:59 +00:00
|
|
|
r = call->show_format(s);
|
|
|
|
if (!r) {
|
|
|
|
/*
|
|
|
|
* ug! The format output is bigger than a PAGE!!
|
|
|
|
*/
|
|
|
|
buf = "FORMAT TOO BIG\n";
|
|
|
|
r = simple_read_from_buffer(ubuf, cnt, ppos,
|
|
|
|
buf, strlen(buf));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = simple_read_from_buffer(ubuf, cnt, ppos,
|
|
|
|
s->buffer, s->len);
|
|
|
|
out:
|
|
|
|
kfree(s);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2009-03-19 19:26:13 +00:00
|
|
|
static ssize_t
|
|
|
|
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct ftrace_event_call *call = filp->private_data;
|
|
|
|
struct trace_seq *s;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (*ppos)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
trace_seq_init(s);
|
|
|
|
trace_seq_printf(s, "%d\n", call->id);
|
|
|
|
|
|
|
|
r = simple_read_from_buffer(ubuf, cnt, ppos,
|
|
|
|
s->buffer, s->len);
|
|
|
|
kfree(s);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2009-03-22 08:31:04 +00:00
|
|
|
static ssize_t
|
|
|
|
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct ftrace_event_call *call = filp->private_data;
|
|
|
|
struct trace_seq *s;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (*ppos)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
trace_seq_init(s);
|
|
|
|
|
2009-04-17 05:27:08 +00:00
|
|
|
filter_print_preds(call, s);
|
2009-03-24 07:14:31 +00:00
|
|
|
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
|
2009-03-22 08:31:04 +00:00
|
|
|
|
|
|
|
kfree(s);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct ftrace_event_call *call = filp->private_data;
|
|
|
|
char buf[64], *pbuf = buf;
|
|
|
|
struct filter_pred *pred;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (cnt >= sizeof(buf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, ubuf, cnt))
|
|
|
|
return -EFAULT;
|
2009-04-11 07:52:18 +00:00
|
|
|
buf[cnt] = '\0';
|
2009-03-22 08:31:04 +00:00
|
|
|
|
|
|
|
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
|
|
|
|
if (!pred)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = filter_parse(&pbuf, pred);
|
|
|
|
if (err < 0) {
|
|
|
|
filter_free_pred(pred);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pred->clear) {
|
tracing/filters: allow on-the-fly filter switching
This patch allows event filters to be safely removed or switched
on-the-fly while avoiding the use of rcu or the suspension of tracing of
previous versions.
It does it by adding a new filter_pred_none() predicate function which
does nothing and by never deallocating either the predicates or any of
the filter_pred members used in matching; the predicate lists are
allocated and initialized during ftrace_event_calls initialization.
Whenever a filter is removed or replaced, the filter_pred_* functions
currently in use by the affected ftrace_event_call are immediately
switched over to to the filter_pred_none() function, while the rest of
the filter_pred members are left intact, allowing any currently
executing filter_pred_* functions to finish up, using the values they're
currently using.
In the case of filter replacement, the new predicate values are copied
into the old predicates after the above step, and the filter_pred_none()
functions are replaced by the filter_pred_* functions for the new
filter. In this case, it is possible though very unlikely that a
previous filter_pred_* is still running even after the
filter_pred_none() switch and the switch to the new filter_pred_*. In
that case, however, because nothing has been deallocated in the
filter_pred, the worst that can happen is that the old filter_pred_*
function sees the new values and as a result produces either a false
positive or a false negative, depending on the values it finds.
So one downside to this method is that rarely, it can produce a bad
match during the filter switch, but it should be possible to live with
that, IMHO.
The other downside is that at least in this patch the predicate lists
are always pre-allocated, taking up memory from the start. They could
probably be allocated on first-use, and de-allocated when tracing is
completely stopped - if this patch makes sense, I could create another
one to do that later on.
Oh, and it also places a restriction on the size of __arrays in events,
currently set to 128, since they can't be larger than the now embedded
str_val arrays in the filter_pred struct.
Signed-off-by: Tom Zanussi <tzanussi@gmail.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: paulmck@linux.vnet.ibm.com
LKML-Reference: <1239610670.6660.49.camel@tropicana>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-13 08:17:50 +00:00
|
|
|
filter_disable_preds(call);
|
2009-03-24 07:14:11 +00:00
|
|
|
filter_free_pred(pred);
|
2009-03-22 08:31:04 +00:00
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2009-04-11 07:55:28 +00:00
|
|
|
err = filter_add_pred(call, pred);
|
|
|
|
if (err < 0) {
|
2009-03-22 08:31:04 +00:00
|
|
|
filter_free_pred(pred);
|
2009-04-11 07:55:28 +00:00
|
|
|
return err;
|
2009-03-22 08:31:04 +00:00
|
|
|
}
|
|
|
|
|
tracing/filters: allow on-the-fly filter switching
This patch allows event filters to be safely removed or switched
on-the-fly while avoiding the use of rcu or the suspension of tracing of
previous versions.
It does it by adding a new filter_pred_none() predicate function which
does nothing and by never deallocating either the predicates or any of
the filter_pred members used in matching; the predicate lists are
allocated and initialized during ftrace_event_calls initialization.
Whenever a filter is removed or replaced, the filter_pred_* functions
currently in use by the affected ftrace_event_call are immediately
switched over to to the filter_pred_none() function, while the rest of
the filter_pred members are left intact, allowing any currently
executing filter_pred_* functions to finish up, using the values they're
currently using.
In the case of filter replacement, the new predicate values are copied
into the old predicates after the above step, and the filter_pred_none()
functions are replaced by the filter_pred_* functions for the new
filter. In this case, it is possible though very unlikely that a
previous filter_pred_* is still running even after the
filter_pred_none() switch and the switch to the new filter_pred_*. In
that case, however, because nothing has been deallocated in the
filter_pred, the worst that can happen is that the old filter_pred_*
function sees the new values and as a result produces either a false
positive or a false negative, depending on the values it finds.
So one downside to this method is that rarely, it can produce a bad
match during the filter switch, but it should be possible to live with
that, IMHO.
The other downside is that at least in this patch the predicate lists
are always pre-allocated, taking up memory from the start. They could
probably be allocated on first-use, and de-allocated when tracing is
completely stopped - if this patch makes sense, I could create another
one to do that later on.
Oh, and it also places a restriction on the size of __arrays in events,
currently set to 128, since they can't be larger than the now embedded
str_val arrays in the filter_pred struct.
Signed-off-by: Tom Zanussi <tzanussi@gmail.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: paulmck@linux.vnet.ibm.com
LKML-Reference: <1239610670.6660.49.camel@tropicana>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-13 08:17:50 +00:00
|
|
|
filter_free_pred(pred);
|
|
|
|
|
2009-03-22 08:31:04 +00:00
|
|
|
*ppos += cnt;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2009-03-22 08:31:17 +00:00
|
|
|
static ssize_t
|
|
|
|
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct event_subsystem *system = filp->private_data;
|
|
|
|
struct trace_seq *s;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (*ppos)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
trace_seq_init(s);
|
|
|
|
|
2009-04-17 05:27:08 +00:00
|
|
|
filter_print_subsystem_preds(system, s);
|
2009-03-24 07:14:31 +00:00
|
|
|
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
|
2009-03-22 08:31:17 +00:00
|
|
|
|
|
|
|
kfree(s);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct event_subsystem *system = filp->private_data;
|
|
|
|
char buf[64], *pbuf = buf;
|
|
|
|
struct filter_pred *pred;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (cnt >= sizeof(buf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, ubuf, cnt))
|
|
|
|
return -EFAULT;
|
2009-04-11 07:52:18 +00:00
|
|
|
buf[cnt] = '\0';
|
2009-03-22 08:31:17 +00:00
|
|
|
|
|
|
|
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
|
|
|
|
if (!pred)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = filter_parse(&pbuf, pred);
|
|
|
|
if (err < 0) {
|
|
|
|
filter_free_pred(pred);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pred->clear) {
|
|
|
|
filter_free_subsystem_preds(system);
|
2009-03-24 07:14:11 +00:00
|
|
|
filter_free_pred(pred);
|
2009-03-22 08:31:17 +00:00
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2009-04-11 07:55:28 +00:00
|
|
|
err = filter_add_subsystem_pred(system, pred);
|
|
|
|
if (err < 0) {
|
2009-03-22 08:31:17 +00:00
|
|
|
filter_free_pred(pred);
|
2009-04-11 07:55:28 +00:00
|
|
|
return err;
|
2009-03-22 08:31:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*ppos += cnt;
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2009-04-15 20:53:47 +00:00
|
|
|
static ssize_t
|
|
|
|
show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
|
|
{
|
|
|
|
int (*func)(struct trace_seq *s) = filp->private_data;
|
|
|
|
struct trace_seq *s;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (*ppos)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
trace_seq_init(s);
|
|
|
|
|
|
|
|
func(s);
|
|
|
|
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
|
|
|
|
|
|
|
|
kfree(s);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
static const struct seq_operations show_event_seq_ops = {
|
|
|
|
.start = t_start,
|
|
|
|
.next = t_next,
|
|
|
|
.show = t_show,
|
|
|
|
.stop = t_stop,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct seq_operations show_set_event_seq_ops = {
|
|
|
|
.start = s_start,
|
|
|
|
.next = s_next,
|
|
|
|
.show = t_show,
|
|
|
|
.stop = t_stop,
|
|
|
|
};
|
|
|
|
|
2009-03-10 16:04:02 +00:00
|
|
|
static const struct file_operations ftrace_avail_fops = {
|
|
|
|
.open = ftrace_event_seq_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
static const struct file_operations ftrace_set_event_fops = {
|
|
|
|
.open = ftrace_event_seq_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = ftrace_event_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
static const struct file_operations ftrace_enable_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = event_enable_read,
|
|
|
|
.write = event_enable_write,
|
|
|
|
};
|
|
|
|
|
2009-03-02 18:53:59 +00:00
|
|
|
static const struct file_operations ftrace_event_format_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = event_format_read,
|
|
|
|
};
|
|
|
|
|
2009-03-19 19:26:13 +00:00
|
|
|
static const struct file_operations ftrace_event_id_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = event_id_read,
|
|
|
|
};
|
|
|
|
|
2009-03-22 08:31:04 +00:00
|
|
|
static const struct file_operations ftrace_event_filter_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = event_filter_read,
|
|
|
|
.write = event_filter_write,
|
|
|
|
};
|
|
|
|
|
2009-03-22 08:31:17 +00:00
|
|
|
static const struct file_operations ftrace_subsystem_filter_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = subsystem_filter_read,
|
|
|
|
.write = subsystem_filter_write,
|
|
|
|
};
|
|
|
|
|
2009-04-15 20:53:47 +00:00
|
|
|
static const struct file_operations ftrace_show_header_fops = {
|
|
|
|
.open = tracing_open_generic,
|
|
|
|
.read = show_header,
|
|
|
|
};
|
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
static struct dentry *event_trace_events_dir(void)
|
|
|
|
{
|
|
|
|
static struct dentry *d_tracer;
|
|
|
|
static struct dentry *d_events;
|
|
|
|
|
|
|
|
if (d_events)
|
|
|
|
return d_events;
|
|
|
|
|
|
|
|
d_tracer = tracing_init_dentry();
|
|
|
|
if (!d_tracer)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
d_events = debugfs_create_dir("events", d_tracer);
|
|
|
|
if (!d_events)
|
|
|
|
pr_warning("Could not create debugfs "
|
|
|
|
"'events' directory\n");
|
|
|
|
|
|
|
|
return d_events;
|
|
|
|
}
|
|
|
|
|
2009-02-28 02:33:02 +00:00
|
|
|
static LIST_HEAD(event_subsystems);
|
|
|
|
|
|
|
|
static struct dentry *
|
|
|
|
event_subsystem_dir(const char *name, struct dentry *d_events)
|
|
|
|
{
|
|
|
|
struct event_subsystem *system;
|
2009-03-31 05:48:49 +00:00
|
|
|
struct dentry *entry;
|
2009-02-28 02:33:02 +00:00
|
|
|
|
|
|
|
/* First see if we did not already create this dir */
|
|
|
|
list_for_each_entry(system, &event_subsystems, list) {
|
|
|
|
if (strcmp(system->name, name) == 0)
|
|
|
|
return system->entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* need to create new entry */
|
|
|
|
system = kmalloc(sizeof(*system), GFP_KERNEL);
|
|
|
|
if (!system) {
|
|
|
|
pr_warning("No memory to create event subsystem %s\n",
|
|
|
|
name);
|
|
|
|
return d_events;
|
|
|
|
}
|
|
|
|
|
|
|
|
system->entry = debugfs_create_dir(name, d_events);
|
|
|
|
if (!system->entry) {
|
|
|
|
pr_warning("Could not create event subsystem %s\n",
|
|
|
|
name);
|
|
|
|
kfree(system);
|
|
|
|
return d_events;
|
|
|
|
}
|
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
system->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!system->name) {
|
|
|
|
debugfs_remove(system->entry);
|
|
|
|
kfree(system);
|
|
|
|
return d_events;
|
|
|
|
}
|
|
|
|
|
2009-02-28 02:33:02 +00:00
|
|
|
list_add(&system->list, &event_subsystems);
|
|
|
|
|
2009-03-22 08:31:17 +00:00
|
|
|
system->preds = NULL;
|
tracing/filters: allow on-the-fly filter switching
This patch allows event filters to be safely removed or switched
on-the-fly while avoiding the use of rcu or the suspension of tracing of
previous versions.
It does it by adding a new filter_pred_none() predicate function which
does nothing and by never deallocating either the predicates or any of
the filter_pred members used in matching; the predicate lists are
allocated and initialized during ftrace_event_calls initialization.
Whenever a filter is removed or replaced, the filter_pred_* functions
currently in use by the affected ftrace_event_call are immediately
switched over to to the filter_pred_none() function, while the rest of
the filter_pred members are left intact, allowing any currently
executing filter_pred_* functions to finish up, using the values they're
currently using.
In the case of filter replacement, the new predicate values are copied
into the old predicates after the above step, and the filter_pred_none()
functions are replaced by the filter_pred_* functions for the new
filter. In this case, it is possible though very unlikely that a
previous filter_pred_* is still running even after the
filter_pred_none() switch and the switch to the new filter_pred_*. In
that case, however, because nothing has been deallocated in the
filter_pred, the worst that can happen is that the old filter_pred_*
function sees the new values and as a result produces either a false
positive or a false negative, depending on the values it finds.
So one downside to this method is that rarely, it can produce a bad
match during the filter switch, but it should be possible to live with
that, IMHO.
The other downside is that at least in this patch the predicate lists
are always pre-allocated, taking up memory from the start. They could
probably be allocated on first-use, and de-allocated when tracing is
completely stopped - if this patch makes sense, I could create another
one to do that later on.
Oh, and it also places a restriction on the size of __arrays in events,
currently set to 128, since they can't be larger than the now embedded
str_val arrays in the filter_pred struct.
Signed-off-by: Tom Zanussi <tzanussi@gmail.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: paulmck@linux.vnet.ibm.com
LKML-Reference: <1239610670.6660.49.camel@tropicana>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-13 08:17:50 +00:00
|
|
|
system->n_preds = 0;
|
2009-03-22 08:31:17 +00:00
|
|
|
|
2009-03-31 05:48:49 +00:00
|
|
|
entry = debugfs_create_file("filter", 0644, system->entry, system,
|
|
|
|
&ftrace_subsystem_filter_fops);
|
|
|
|
if (!entry)
|
|
|
|
pr_warning("Could not create debugfs "
|
|
|
|
"'%s/filter' entry\n", name);
|
|
|
|
|
2009-02-28 02:33:02 +00:00
|
|
|
return system->entry;
|
|
|
|
}
|
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
static int
|
2009-04-25 03:11:22 +00:00
|
|
|
event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
|
|
|
|
const struct file_operations *id,
|
|
|
|
const struct file_operations *enable,
|
|
|
|
const struct file_operations *filter,
|
|
|
|
const struct file_operations *format)
|
2009-02-24 19:15:08 +00:00
|
|
|
{
|
|
|
|
struct dentry *entry;
|
2009-02-28 07:41:25 +00:00
|
|
|
int ret;
|
2009-02-24 19:15:08 +00:00
|
|
|
|
2009-02-28 02:33:02 +00:00
|
|
|
/*
|
|
|
|
* If the trace point header did not define TRACE_SYSTEM
|
|
|
|
* then the system would be called "TRACE_SYSTEM".
|
|
|
|
*/
|
2009-04-10 18:53:50 +00:00
|
|
|
if (strcmp(call->system, TRACE_SYSTEM) != 0)
|
2009-02-28 02:33:02 +00:00
|
|
|
d_events = event_subsystem_dir(call->system, d_events);
|
|
|
|
|
2009-02-28 07:41:25 +00:00
|
|
|
if (call->raw_init) {
|
|
|
|
ret = call->raw_init();
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_warning("Could not initialize trace point"
|
|
|
|
" events/%s\n", call->name);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
call->dir = debugfs_create_dir(call->name, d_events);
|
|
|
|
if (!call->dir) {
|
|
|
|
pr_warning("Could not create debugfs "
|
|
|
|
"'%s' directory\n", call->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
if (call->regfunc)
|
|
|
|
entry = trace_create_file("enable", 0644, call->dir, call,
|
2009-04-25 03:11:22 +00:00
|
|
|
enable);
|
2009-02-24 19:15:08 +00:00
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
if (call->id)
|
|
|
|
entry = trace_create_file("id", 0444, call->dir, call,
|
2009-04-25 03:11:22 +00:00
|
|
|
id);
|
2009-03-19 19:26:13 +00:00
|
|
|
|
2009-03-22 08:30:39 +00:00
|
|
|
if (call->define_fields) {
|
|
|
|
ret = call->define_fields();
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_warning("Could not initialize trace point"
|
|
|
|
" events/%s\n", call->name);
|
|
|
|
return ret;
|
|
|
|
}
|
2009-04-10 18:53:50 +00:00
|
|
|
entry = trace_create_file("filter", 0644, call->dir, call,
|
2009-04-25 03:11:22 +00:00
|
|
|
filter);
|
2009-03-22 08:30:39 +00:00
|
|
|
}
|
|
|
|
|
2009-03-02 18:53:59 +00:00
|
|
|
/* A trace may not want to export its format */
|
|
|
|
if (!call->show_format)
|
|
|
|
return 0;
|
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
entry = trace_create_file("format", 0444, call->dir, call,
|
2009-04-25 03:11:22 +00:00
|
|
|
format);
|
2009-04-10 18:53:50 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define for_each_event(event, start, end) \
|
|
|
|
for (event = start; \
|
|
|
|
(unsigned long)event < (unsigned long)end; \
|
|
|
|
event++)
|
|
|
|
|
2009-04-14 22:22:32 +00:00
|
|
|
#ifdef CONFIG_MODULES
|
2009-04-25 03:11:22 +00:00
|
|
|
|
|
|
|
static LIST_HEAD(ftrace_module_file_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Modules must own their file_operations to keep up with
|
|
|
|
* reference counting.
|
|
|
|
*/
|
|
|
|
struct ftrace_module_file_ops {
|
|
|
|
struct list_head list;
|
|
|
|
struct module *mod;
|
|
|
|
struct file_operations id;
|
|
|
|
struct file_operations enable;
|
|
|
|
struct file_operations format;
|
|
|
|
struct file_operations filter;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct ftrace_module_file_ops *
|
|
|
|
trace_create_file_ops(struct module *mod)
|
|
|
|
{
|
|
|
|
struct ftrace_module_file_ops *file_ops;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a bit of a PITA. To allow for correct reference
|
|
|
|
* counting, modules must "own" their file_operations.
|
|
|
|
* To do this, we allocate the file operations that will be
|
|
|
|
* used in the event directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
|
|
|
|
if (!file_ops)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
file_ops->mod = mod;
|
|
|
|
|
|
|
|
file_ops->id = ftrace_event_id_fops;
|
|
|
|
file_ops->id.owner = mod;
|
|
|
|
|
|
|
|
file_ops->enable = ftrace_enable_fops;
|
|
|
|
file_ops->enable.owner = mod;
|
|
|
|
|
|
|
|
file_ops->filter = ftrace_event_filter_fops;
|
|
|
|
file_ops->filter.owner = mod;
|
|
|
|
|
|
|
|
file_ops->format = ftrace_event_format_fops;
|
|
|
|
file_ops->format.owner = mod;
|
|
|
|
|
|
|
|
list_add(&file_ops->list, &ftrace_module_file_list);
|
|
|
|
|
|
|
|
return file_ops;
|
|
|
|
}
|
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
static void trace_module_add_events(struct module *mod)
|
|
|
|
{
|
2009-04-25 03:11:22 +00:00
|
|
|
struct ftrace_module_file_ops *file_ops = NULL;
|
2009-04-10 18:53:50 +00:00
|
|
|
struct ftrace_event_call *call, *start, *end;
|
|
|
|
struct dentry *d_events;
|
|
|
|
|
|
|
|
start = mod->trace_events;
|
|
|
|
end = mod->trace_events + mod->num_trace_events;
|
|
|
|
|
|
|
|
if (start == end)
|
|
|
|
return;
|
|
|
|
|
|
|
|
d_events = event_trace_events_dir();
|
|
|
|
if (!d_events)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_event(call, start, end) {
|
|
|
|
/* The linker may leave blanks */
|
|
|
|
if (!call->name)
|
|
|
|
continue;
|
2009-04-25 03:11:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This module has events, create file ops for this module
|
|
|
|
* if not already done.
|
|
|
|
*/
|
|
|
|
if (!file_ops) {
|
|
|
|
file_ops = trace_create_file_ops(mod);
|
|
|
|
if (!file_ops)
|
|
|
|
return;
|
|
|
|
}
|
2009-04-10 18:53:50 +00:00
|
|
|
call->mod = mod;
|
|
|
|
list_add(&call->list, &ftrace_events);
|
2009-04-25 03:11:22 +00:00
|
|
|
event_create_dir(call, d_events,
|
|
|
|
&file_ops->id, &file_ops->enable,
|
|
|
|
&file_ops->filter, &file_ops->format);
|
2009-04-10 18:53:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void trace_module_remove_events(struct module *mod)
|
|
|
|
{
|
2009-04-25 03:11:22 +00:00
|
|
|
struct ftrace_module_file_ops *file_ops;
|
2009-04-10 18:53:50 +00:00
|
|
|
struct ftrace_event_call *call, *p;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(call, p, &ftrace_events, list) {
|
|
|
|
if (call->mod == mod) {
|
|
|
|
if (call->enabled) {
|
|
|
|
call->enabled = 0;
|
|
|
|
call->unregfunc();
|
|
|
|
}
|
|
|
|
if (call->event)
|
|
|
|
unregister_ftrace_event(call->event);
|
|
|
|
debugfs_remove_recursive(call->dir);
|
|
|
|
list_del(&call->list);
|
|
|
|
}
|
|
|
|
}
|
2009-04-25 03:11:22 +00:00
|
|
|
|
|
|
|
/* Now free the file_operations */
|
|
|
|
list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
|
|
|
|
if (file_ops->mod == mod)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (&file_ops->list != &ftrace_module_file_list) {
|
|
|
|
list_del(&file_ops->list);
|
|
|
|
kfree(file_ops);
|
|
|
|
}
|
2009-04-10 18:53:50 +00:00
|
|
|
}
|
|
|
|
|
2009-04-14 22:22:32 +00:00
|
|
|
static int trace_module_notify(struct notifier_block *self,
|
|
|
|
unsigned long val, void *data)
|
2009-04-10 18:53:50 +00:00
|
|
|
{
|
|
|
|
struct module *mod = data;
|
|
|
|
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
switch (val) {
|
|
|
|
case MODULE_STATE_COMING:
|
|
|
|
trace_module_add_events(mod);
|
|
|
|
break;
|
|
|
|
case MODULE_STATE_GOING:
|
|
|
|
trace_module_remove_events(mod);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mutex_unlock(&event_mutex);
|
2009-02-28 07:41:25 +00:00
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-04-14 22:22:32 +00:00
|
|
|
#else
|
|
|
|
static int trace_module_notify(struct notifier_block *self,
|
|
|
|
unsigned long val, void *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MODULES */
|
2009-02-24 19:15:08 +00:00
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
struct notifier_block trace_module_nb = {
|
|
|
|
.notifier_call = trace_module_notify,
|
|
|
|
.priority = 0,
|
|
|
|
};
|
|
|
|
|
2009-04-10 17:52:20 +00:00
|
|
|
extern struct ftrace_event_call __start_ftrace_events[];
|
|
|
|
extern struct ftrace_event_call __stop_ftrace_events[];
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
static __init int event_trace_init(void)
|
|
|
|
{
|
2009-04-10 17:52:20 +00:00
|
|
|
struct ftrace_event_call *call;
|
2009-02-24 15:21:36 +00:00
|
|
|
struct dentry *d_tracer;
|
|
|
|
struct dentry *entry;
|
2009-02-24 19:15:08 +00:00
|
|
|
struct dentry *d_events;
|
2009-04-10 18:53:50 +00:00
|
|
|
int ret;
|
2009-02-24 15:21:36 +00:00
|
|
|
|
|
|
|
d_tracer = tracing_init_dentry();
|
|
|
|
if (!d_tracer)
|
|
|
|
return 0;
|
|
|
|
|
2009-03-10 16:04:02 +00:00
|
|
|
entry = debugfs_create_file("available_events", 0444, d_tracer,
|
|
|
|
(void *)&show_event_seq_ops,
|
|
|
|
&ftrace_avail_fops);
|
|
|
|
if (!entry)
|
|
|
|
pr_warning("Could not create debugfs "
|
|
|
|
"'available_events' entry\n");
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
entry = debugfs_create_file("set_event", 0644, d_tracer,
|
|
|
|
(void *)&show_set_event_seq_ops,
|
|
|
|
&ftrace_set_event_fops);
|
|
|
|
if (!entry)
|
|
|
|
pr_warning("Could not create debugfs "
|
|
|
|
"'set_event' entry\n");
|
|
|
|
|
2009-02-24 19:15:08 +00:00
|
|
|
d_events = event_trace_events_dir();
|
|
|
|
if (!d_events)
|
|
|
|
return 0;
|
|
|
|
|
2009-04-15 20:53:47 +00:00
|
|
|
/* ring buffer internal formats */
|
|
|
|
trace_create_file("header_page", 0444, d_events,
|
|
|
|
ring_buffer_print_page_header,
|
|
|
|
&ftrace_show_header_fops);
|
|
|
|
|
|
|
|
trace_create_file("header_event", 0444, d_events,
|
|
|
|
ring_buffer_print_entry_header,
|
|
|
|
&ftrace_show_header_fops);
|
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
|
2009-02-24 19:15:08 +00:00
|
|
|
/* The linker may leave blanks */
|
|
|
|
if (!call->name)
|
|
|
|
continue;
|
2009-04-10 17:52:20 +00:00
|
|
|
list_add(&call->list, &ftrace_events);
|
2009-04-25 03:11:22 +00:00
|
|
|
event_create_dir(call, d_events, &ftrace_event_id_fops,
|
|
|
|
&ftrace_enable_fops, &ftrace_event_filter_fops,
|
|
|
|
&ftrace_event_format_fops);
|
2009-02-24 19:15:08 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 18:53:50 +00:00
|
|
|
ret = register_module_notifier(&trace_module_nb);
|
|
|
|
if (!ret)
|
|
|
|
pr_warning("Failed to register trace events module notifier\n");
|
|
|
|
|
2009-02-24 15:21:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
fs_initcall(event_trace_init);
|
2009-04-15 17:36:40 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(test_spinlock);
|
|
|
|
static DEFINE_SPINLOCK(test_spinlock_irq);
|
|
|
|
static DEFINE_MUTEX(test_mutex);
|
|
|
|
|
|
|
|
static __init void test_work(struct work_struct *dummy)
|
|
|
|
{
|
|
|
|
spin_lock(&test_spinlock);
|
|
|
|
spin_lock_irq(&test_spinlock_irq);
|
|
|
|
udelay(1);
|
|
|
|
spin_unlock_irq(&test_spinlock_irq);
|
|
|
|
spin_unlock(&test_spinlock);
|
|
|
|
|
|
|
|
mutex_lock(&test_mutex);
|
|
|
|
msleep(1);
|
|
|
|
mutex_unlock(&test_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init int event_test_thread(void *unused)
|
|
|
|
{
|
|
|
|
void *test_malloc;
|
|
|
|
|
|
|
|
test_malloc = kmalloc(1234, GFP_KERNEL);
|
|
|
|
if (!test_malloc)
|
|
|
|
pr_info("failed to kmalloc\n");
|
|
|
|
|
|
|
|
schedule_on_each_cpu(test_work);
|
|
|
|
|
|
|
|
kfree(test_malloc);
|
|
|
|
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
while (!kthread_should_stop())
|
|
|
|
schedule();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do various things that may trigger events.
|
|
|
|
*/
|
|
|
|
static __init void event_test_stuff(void)
|
|
|
|
{
|
|
|
|
struct task_struct *test_thread;
|
|
|
|
|
|
|
|
test_thread = kthread_run(event_test_thread, NULL, "test-events");
|
|
|
|
msleep(1);
|
|
|
|
kthread_stop(test_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For every trace event defined, we will test each trace point separately,
|
|
|
|
* and then by groups, and finally all trace points.
|
|
|
|
*/
|
2009-04-16 16:15:44 +00:00
|
|
|
static __init void event_trace_self_tests(void)
|
2009-04-15 17:36:40 +00:00
|
|
|
{
|
|
|
|
struct ftrace_event_call *call;
|
|
|
|
struct event_subsystem *system;
|
|
|
|
char *sysname;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pr_info("Running tests on trace events:\n");
|
|
|
|
|
|
|
|
list_for_each_entry(call, &ftrace_events, list) {
|
|
|
|
|
|
|
|
/* Only test those that have a regfunc */
|
|
|
|
if (!call->regfunc)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pr_info("Testing event %s: ", call->name);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If an event is already enabled, someone is using
|
|
|
|
* it and the self test should not be on.
|
|
|
|
*/
|
|
|
|
if (call->enabled) {
|
|
|
|
pr_warning("Enabled event during self test!\n");
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
call->enabled = 1;
|
|
|
|
call->regfunc();
|
|
|
|
|
|
|
|
event_test_stuff();
|
|
|
|
|
|
|
|
call->unregfunc();
|
|
|
|
call->enabled = 0;
|
|
|
|
|
|
|
|
pr_cont("OK\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now test at the sub system level */
|
|
|
|
|
|
|
|
pr_info("Running tests on trace event systems:\n");
|
|
|
|
|
|
|
|
list_for_each_entry(system, &event_subsystems, list) {
|
|
|
|
|
|
|
|
/* the ftrace system is special, skip it */
|
|
|
|
if (strcmp(system->name, "ftrace") == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pr_info("Testing event system %s: ", system->name);
|
|
|
|
|
|
|
|
/* ftrace_set_clr_event can modify the name passed in. */
|
|
|
|
sysname = kstrdup(system->name, GFP_KERNEL);
|
|
|
|
if (WARN_ON(!sysname)) {
|
|
|
|
pr_warning("Can't allocate memory, giving up!\n");
|
2009-04-16 16:15:44 +00:00
|
|
|
return;
|
2009-04-15 17:36:40 +00:00
|
|
|
}
|
|
|
|
ret = ftrace_set_clr_event(sysname, 1);
|
|
|
|
kfree(sysname);
|
|
|
|
if (WARN_ON_ONCE(ret)) {
|
|
|
|
pr_warning("error enabling system %s\n",
|
|
|
|
system->name);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
event_test_stuff();
|
|
|
|
|
|
|
|
sysname = kstrdup(system->name, GFP_KERNEL);
|
|
|
|
if (WARN_ON(!sysname)) {
|
|
|
|
pr_warning("Can't allocate memory, giving up!\n");
|
2009-04-16 16:15:44 +00:00
|
|
|
return;
|
2009-04-15 17:36:40 +00:00
|
|
|
}
|
|
|
|
ret = ftrace_set_clr_event(sysname, 0);
|
|
|
|
kfree(sysname);
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(ret))
|
|
|
|
pr_warning("error disabling system %s\n",
|
|
|
|
system->name);
|
|
|
|
|
|
|
|
pr_cont("OK\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Test with all events enabled */
|
|
|
|
|
|
|
|
pr_info("Running tests on all trace events:\n");
|
|
|
|
pr_info("Testing all events: ");
|
|
|
|
|
|
|
|
sysname = kmalloc(4, GFP_KERNEL);
|
|
|
|
if (WARN_ON(!sysname)) {
|
|
|
|
pr_warning("Can't allocate memory, giving up!\n");
|
2009-04-16 16:15:44 +00:00
|
|
|
return;
|
2009-04-15 17:36:40 +00:00
|
|
|
}
|
|
|
|
memcpy(sysname, "*:*", 4);
|
|
|
|
ret = ftrace_set_clr_event(sysname, 1);
|
|
|
|
if (WARN_ON_ONCE(ret)) {
|
|
|
|
kfree(sysname);
|
|
|
|
pr_warning("error enabling all events\n");
|
2009-04-16 16:15:44 +00:00
|
|
|
return;
|
2009-04-15 17:36:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
event_test_stuff();
|
|
|
|
|
|
|
|
/* reset sysname */
|
|
|
|
memcpy(sysname, "*:*", 4);
|
|
|
|
ret = ftrace_set_clr_event(sysname, 0);
|
|
|
|
kfree(sysname);
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(ret)) {
|
|
|
|
pr_warning("error disabling all events\n");
|
2009-04-16 16:15:44 +00:00
|
|
|
return;
|
2009-04-15 17:36:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pr_cont("OK\n");
|
2009-04-16 16:15:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(atomic_t, test_event_disable);
|
|
|
|
|
|
|
|
static void
|
|
|
|
function_test_events_call(unsigned long ip, unsigned long parent_ip)
|
|
|
|
{
|
|
|
|
struct ring_buffer_event *event;
|
|
|
|
struct ftrace_entry *entry;
|
|
|
|
unsigned long flags;
|
|
|
|
long disabled;
|
|
|
|
int resched;
|
|
|
|
int cpu;
|
|
|
|
int pc;
|
|
|
|
|
|
|
|
pc = preempt_count();
|
|
|
|
resched = ftrace_preempt_disable();
|
|
|
|
cpu = raw_smp_processor_id();
|
|
|
|
disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
|
|
|
|
|
|
|
|
if (disabled != 1)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
local_save_flags(flags);
|
|
|
|
|
|
|
|
event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
|
|
|
|
flags, pc);
|
|
|
|
if (!event)
|
|
|
|
goto out;
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
entry->ip = ip;
|
|
|
|
entry->parent_ip = parent_ip;
|
|
|
|
|
2009-04-20 22:16:44 +00:00
|
|
|
trace_nowake_buffer_unlock_commit(event, flags, pc);
|
2009-04-16 16:15:44 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
atomic_dec(&per_cpu(test_event_disable, cpu));
|
|
|
|
ftrace_preempt_enable(resched);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ftrace_ops trace_ops __initdata =
|
|
|
|
{
|
|
|
|
.func = function_test_events_call,
|
|
|
|
};
|
|
|
|
|
|
|
|
static __init void event_trace_self_test_with_function(void)
|
|
|
|
{
|
|
|
|
register_ftrace_function(&trace_ops);
|
|
|
|
pr_info("Running tests again, along with the function tracer\n");
|
|
|
|
event_trace_self_tests();
|
|
|
|
unregister_ftrace_function(&trace_ops);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static __init void event_trace_self_test_with_function(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static __init int event_trace_self_tests_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
event_trace_self_tests();
|
|
|
|
|
|
|
|
event_trace_self_test_with_function();
|
2009-04-15 17:36:40 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-20 16:12:44 +00:00
|
|
|
late_initcall(event_trace_self_tests_init);
|
2009-04-15 17:36:40 +00:00
|
|
|
|
|
|
|
#endif
|