mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
13a1e4aef5
The event trigger code that checks for callback triggers before and after recording of an event has lots of flags checks. This code is duplicated throughout the ftrace events, kprobes and system calls. They all do the exact same checks against the event flags. Added helper functions ftrace_trigger_soft_disabled(), event_trigger_unlock_commit() and event_trigger_unlock_commit_regs() that consolidated the code and these are used instead. Link: http://lkml.kernel.org/r/20140106222703.5e7dbba2@gandalf.local.home Acked-by: Tom Zanussi <tom.zanussi@linux.intel.com> Tested-by: Tom Zanussi <tom.zanussi@linux.intel.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
734 lines
21 KiB
C
734 lines
21 KiB
C
/*
|
|
* Stage 1 of the trace events.
|
|
*
|
|
* Override the macros in <trace/trace_events.h> to include the following:
|
|
*
|
|
* struct ftrace_raw_<call> {
|
|
* struct trace_entry ent;
|
|
* <type> <item>;
|
|
* <type2> <item2>[<len>];
|
|
* [...]
|
|
* };
|
|
*
|
|
* The <type> <item> is created by the __field(type, item) macro or
|
|
* the __array(type2, item2, len) macro.
|
|
* We simply do "type item;", and that will create the fields
|
|
* in the structure.
|
|
*/
|
|
|
|
#include <linux/ftrace_event.h>
|
|
|
|
/*
|
|
* DECLARE_EVENT_CLASS can be used to add a generic function
|
|
* handlers for events. That is, if all events have the same
|
|
* parameters and just have distinct trace points.
|
|
* Each tracepoint can be defined with DEFINE_EVENT and that
|
|
* will map the DECLARE_EVENT_CLASS to the tracepoint.
|
|
*
|
|
* TRACE_EVENT is a one to one mapping between tracepoint and template.
|
|
*/
|
|
#undef TRACE_EVENT
|
|
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
|
|
DECLARE_EVENT_CLASS(name, \
|
|
PARAMS(proto), \
|
|
PARAMS(args), \
|
|
PARAMS(tstruct), \
|
|
PARAMS(assign), \
|
|
PARAMS(print)); \
|
|
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
|
|
|
|
|
|
#undef __field
|
|
#define __field(type, item) type item;
|
|
|
|
#undef __field_ext
|
|
#define __field_ext(type, item, filter_type) type item;
|
|
|
|
#undef __array
|
|
#define __array(type, item, len) type item[len];
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1)
|
|
|
|
#undef TP_STRUCT__entry
|
|
#define TP_STRUCT__entry(args...) args
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
|
|
struct ftrace_raw_##name { \
|
|
struct trace_entry ent; \
|
|
tstruct \
|
|
char __data[0]; \
|
|
}; \
|
|
\
|
|
static struct ftrace_event_class event_class_##name;
|
|
|
|
#undef DEFINE_EVENT
|
|
#define DEFINE_EVENT(template, name, proto, args) \
|
|
static struct ftrace_event_call __used \
|
|
__attribute__((__aligned__(4))) event_##name
|
|
|
|
#undef DEFINE_EVENT_FN
|
|
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
|
|
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
|
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
|
|
|
/* Callbacks are meaningless to ftrace. */
|
|
#undef TRACE_EVENT_FN
|
|
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
|
|
assign, print, reg, unreg) \
|
|
TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
|
|
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
|
|
|
|
#undef TRACE_EVENT_FLAGS
|
|
#define TRACE_EVENT_FLAGS(name, value) \
|
|
__TRACE_EVENT_FLAGS(name, value)
|
|
|
|
#undef TRACE_EVENT_PERF_PERM
|
|
#define TRACE_EVENT_PERF_PERM(name, expr...) \
|
|
__TRACE_EVENT_PERF_PERM(name, expr)
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
|
|
/*
|
|
* Stage 2 of the trace events.
|
|
*
|
|
* Include the following:
|
|
*
|
|
* struct ftrace_data_offsets_<call> {
|
|
* u32 <item1>;
|
|
* u32 <item2>;
|
|
* [...]
|
|
* };
|
|
*
|
|
* The __dynamic_array() macro will create each u32 <item>, this is
|
|
* to keep the offset of each array from the beginning of the event.
|
|
* The size of an array is also encoded, in the higher 16 bits of <item>.
|
|
*/
|
|
|
|
#undef __field
|
|
#define __field(type, item)
|
|
|
|
#undef __field_ext
|
|
#define __field_ext(type, item, filter_type)
|
|
|
|
#undef __array
|
|
#define __array(type, item, len)
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) u32 item;
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1)
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
|
struct ftrace_data_offsets_##call { \
|
|
tstruct; \
|
|
};
|
|
|
|
#undef DEFINE_EVENT
|
|
#define DEFINE_EVENT(template, name, proto, args)
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
|
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
|
|
|
#undef TRACE_EVENT_FLAGS
|
|
#define TRACE_EVENT_FLAGS(event, flag)
|
|
|
|
#undef TRACE_EVENT_PERF_PERM
|
|
#define TRACE_EVENT_PERF_PERM(event, expr...)
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
/*
|
|
* Stage 3 of the trace events.
|
|
*
|
|
* Override the macros in <trace/trace_events.h> to include the following:
|
|
*
|
|
* enum print_line_t
|
|
* ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
|
|
* {
|
|
* struct trace_seq *s = &iter->seq;
|
|
* struct ftrace_raw_<call> *field; <-- defined in stage 1
|
|
* struct trace_entry *entry;
|
|
* struct trace_seq *p = &iter->tmp_seq;
|
|
* int ret;
|
|
*
|
|
* entry = iter->ent;
|
|
*
|
|
* if (entry->type != event_<call>->event.type) {
|
|
* WARN_ON_ONCE(1);
|
|
* return TRACE_TYPE_UNHANDLED;
|
|
* }
|
|
*
|
|
* field = (typeof(field))entry;
|
|
*
|
|
* trace_seq_init(p);
|
|
* ret = trace_seq_printf(s, "%s: ", <call>);
|
|
* if (ret)
|
|
* ret = trace_seq_printf(s, <TP_printk> "\n");
|
|
* if (!ret)
|
|
* return TRACE_TYPE_PARTIAL_LINE;
|
|
*
|
|
* return TRACE_TYPE_HANDLED;
|
|
* }
|
|
*
|
|
* This is the method used to print the raw event to the trace
|
|
* output format. Note, this is not needed if the data is read
|
|
* in binary.
|
|
*/
|
|
|
|
#undef __entry
|
|
#define __entry field
|
|
|
|
#undef TP_printk
|
|
#define TP_printk(fmt, args...) fmt "\n", args
|
|
|
|
#undef __get_dynamic_array
|
|
#define __get_dynamic_array(field) \
|
|
((void *)__entry + (__entry->__data_loc_##field & 0xffff))
|
|
|
|
#undef __get_str
|
|
#define __get_str(field) (char *)__get_dynamic_array(field)
|
|
|
|
#undef __print_flags
|
|
#define __print_flags(flag, delim, flag_array...) \
|
|
({ \
|
|
static const struct trace_print_flags __flags[] = \
|
|
{ flag_array, { -1, NULL }}; \
|
|
ftrace_print_flags_seq(p, delim, flag, __flags); \
|
|
})
|
|
|
|
#undef __print_symbolic
|
|
#define __print_symbolic(value, symbol_array...) \
|
|
({ \
|
|
static const struct trace_print_flags symbols[] = \
|
|
{ symbol_array, { -1, NULL }}; \
|
|
ftrace_print_symbols_seq(p, value, symbols); \
|
|
})
|
|
|
|
#undef __print_symbolic_u64
|
|
#if BITS_PER_LONG == 32
|
|
#define __print_symbolic_u64(value, symbol_array...) \
|
|
({ \
|
|
static const struct trace_print_flags_u64 symbols[] = \
|
|
{ symbol_array, { -1, NULL } }; \
|
|
ftrace_print_symbols_seq_u64(p, value, symbols); \
|
|
})
|
|
#else
|
|
#define __print_symbolic_u64(value, symbol_array...) \
|
|
__print_symbolic(value, symbol_array)
|
|
#endif
|
|
|
|
#undef __print_hex
|
|
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
|
static notrace enum print_line_t \
|
|
ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
|
|
struct trace_event *trace_event) \
|
|
{ \
|
|
struct trace_seq *s = &iter->seq; \
|
|
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
|
|
struct ftrace_raw_##call *field; \
|
|
int ret; \
|
|
\
|
|
field = (typeof(field))iter->ent; \
|
|
\
|
|
ret = ftrace_raw_output_prep(iter, trace_event); \
|
|
if (ret) \
|
|
return ret; \
|
|
\
|
|
ret = trace_seq_printf(s, print); \
|
|
if (!ret) \
|
|
return TRACE_TYPE_PARTIAL_LINE; \
|
|
\
|
|
return TRACE_TYPE_HANDLED; \
|
|
} \
|
|
static struct trace_event_functions ftrace_event_type_funcs_##call = { \
|
|
.trace = ftrace_raw_output_##call, \
|
|
};
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
|
|
static notrace enum print_line_t \
|
|
ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
|
|
struct trace_event *event) \
|
|
{ \
|
|
struct trace_seq *s = &iter->seq; \
|
|
struct ftrace_raw_##template *field; \
|
|
struct trace_entry *entry; \
|
|
struct trace_seq *p = &iter->tmp_seq; \
|
|
int ret; \
|
|
\
|
|
entry = iter->ent; \
|
|
\
|
|
if (entry->type != event_##call.event.type) { \
|
|
WARN_ON_ONCE(1); \
|
|
return TRACE_TYPE_UNHANDLED; \
|
|
} \
|
|
\
|
|
field = (typeof(field))entry; \
|
|
\
|
|
trace_seq_init(p); \
|
|
ret = trace_seq_printf(s, "%s: ", #call); \
|
|
if (ret) \
|
|
ret = trace_seq_printf(s, print); \
|
|
if (!ret) \
|
|
return TRACE_TYPE_PARTIAL_LINE; \
|
|
\
|
|
return TRACE_TYPE_HANDLED; \
|
|
} \
|
|
static struct trace_event_functions ftrace_event_type_funcs_##call = { \
|
|
.trace = ftrace_raw_output_##call, \
|
|
};
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
#undef __field_ext
|
|
#define __field_ext(type, item, filter_type) \
|
|
ret = trace_define_field(event_call, #type, #item, \
|
|
offsetof(typeof(field), item), \
|
|
sizeof(field.item), \
|
|
is_signed_type(type), filter_type); \
|
|
if (ret) \
|
|
return ret;
|
|
|
|
#undef __field
|
|
#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
|
|
|
|
#undef __array
|
|
#define __array(type, item, len) \
|
|
do { \
|
|
mutex_lock(&event_storage_mutex); \
|
|
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
|
|
snprintf(event_storage, sizeof(event_storage), \
|
|
"%s[%d]", #type, len); \
|
|
ret = trace_define_field(event_call, event_storage, #item, \
|
|
offsetof(typeof(field), item), \
|
|
sizeof(field.item), \
|
|
is_signed_type(type), FILTER_OTHER); \
|
|
mutex_unlock(&event_storage_mutex); \
|
|
if (ret) \
|
|
return ret; \
|
|
} while (0);
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) \
|
|
ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
|
|
offsetof(typeof(field), __data_loc_##item), \
|
|
sizeof(field.__data_loc_##item), \
|
|
is_signed_type(type), FILTER_OTHER);
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1)
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
|
|
static int notrace __init \
|
|
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
|
|
{ \
|
|
struct ftrace_raw_##call field; \
|
|
int ret; \
|
|
\
|
|
tstruct; \
|
|
\
|
|
return ret; \
|
|
}
|
|
|
|
#undef DEFINE_EVENT
|
|
#define DEFINE_EVENT(template, name, proto, args)
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
|
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
/*
|
|
* remember the offset of each array from the beginning of the event.
|
|
*/
|
|
|
|
#undef __entry
|
|
#define __entry entry
|
|
|
|
#undef __field
|
|
#define __field(type, item)
|
|
|
|
#undef __field_ext
|
|
#define __field_ext(type, item, filter_type)
|
|
|
|
#undef __array
|
|
#define __array(type, item, len)
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) \
|
|
__data_offsets->item = __data_size + \
|
|
offsetof(typeof(*entry), __data); \
|
|
__data_offsets->item |= (len * sizeof(type)) << 16; \
|
|
__data_size += (len) * sizeof(type);
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, \
|
|
strlen((src) ? (const char *)(src) : "(null)") + 1)
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
|
static inline notrace int ftrace_get_offsets_##call( \
|
|
struct ftrace_data_offsets_##call *__data_offsets, proto) \
|
|
{ \
|
|
int __data_size = 0; \
|
|
struct ftrace_raw_##call __maybe_unused *entry; \
|
|
\
|
|
tstruct; \
|
|
\
|
|
return __data_size; \
|
|
}
|
|
|
|
#undef DEFINE_EVENT
|
|
#define DEFINE_EVENT(template, name, proto, args)
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
|
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
/*
|
|
* Stage 4 of the trace events.
|
|
*
|
|
* Override the macros in <trace/trace_events.h> to include the following:
|
|
*
|
|
* For those macros defined with TRACE_EVENT:
|
|
*
|
|
* static struct ftrace_event_call event_<call>;
|
|
*
|
|
* static void ftrace_raw_event_<call>(void *__data, proto)
|
|
* {
|
|
* struct ftrace_event_file *ftrace_file = __data;
|
|
* struct ftrace_event_call *event_call = ftrace_file->event_call;
|
|
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
|
|
* unsigned long eflags = ftrace_file->flags;
|
|
* enum event_trigger_type __tt = ETT_NONE;
|
|
* struct ring_buffer_event *event;
|
|
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
|
|
* struct ring_buffer *buffer;
|
|
* unsigned long irq_flags;
|
|
* int __data_size;
|
|
* int pc;
|
|
*
|
|
* if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
|
|
* if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
|
|
* event_triggers_call(ftrace_file, NULL);
|
|
* if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
|
|
* return;
|
|
* }
|
|
*
|
|
* local_save_flags(irq_flags);
|
|
* pc = preempt_count();
|
|
*
|
|
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
|
|
*
|
|
* event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
|
|
* event_<call>->event.type,
|
|
* sizeof(*entry) + __data_size,
|
|
* irq_flags, pc);
|
|
* if (!event)
|
|
* return;
|
|
* entry = ring_buffer_event_data(event);
|
|
*
|
|
* { <assign>; } <-- Here we assign the entries by the __field and
|
|
* __array macros.
|
|
*
|
|
* if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
|
|
* __tt = event_triggers_call(ftrace_file, entry);
|
|
*
|
|
* if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
|
|
* &ftrace_file->flags))
|
|
* ring_buffer_discard_commit(buffer, event);
|
|
* else if (!filter_check_discard(ftrace_file, entry, buffer, event))
|
|
* trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
|
*
|
|
* if (__tt)
|
|
* event_triggers_post_call(ftrace_file, __tt);
|
|
* }
|
|
*
|
|
* static struct trace_event ftrace_event_type_<call> = {
|
|
* .trace = ftrace_raw_output_<call>, <-- stage 2
|
|
* };
|
|
*
|
|
* static const char print_fmt_<call>[] = <TP_printk>;
|
|
*
|
|
* static struct ftrace_event_class __used event_class_<template> = {
|
|
* .system = "<system>",
|
|
* .define_fields = ftrace_define_fields_<call>,
|
|
* .fields = LIST_HEAD_INIT(event_class_##call.fields),
|
|
* .raw_init = trace_event_raw_init,
|
|
* .probe = ftrace_raw_event_##call,
|
|
* .reg = ftrace_event_reg,
|
|
* };
|
|
*
|
|
* static struct ftrace_event_call event_<call> = {
|
|
* .name = "<call>",
|
|
* .class = event_class_<template>,
|
|
* .event = &ftrace_event_type_<call>,
|
|
* .print_fmt = print_fmt_<call>,
|
|
* };
|
|
* // its only safe to use pointers when doing linker tricks to
|
|
* // create an array.
|
|
* static struct ftrace_event_call __used
|
|
* __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
|
|
*
|
|
*/
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
#define _TRACE_PERF_PROTO(call, proto) \
|
|
static notrace void \
|
|
perf_trace_##call(void *__data, proto);
|
|
|
|
#define _TRACE_PERF_INIT(call) \
|
|
.perf_probe = perf_trace_##call,
|
|
|
|
#else
|
|
#define _TRACE_PERF_PROTO(call, proto)
|
|
#define _TRACE_PERF_INIT(call)
|
|
#endif /* CONFIG_PERF_EVENTS */
|
|
|
|
#undef __entry
|
|
#define __entry entry
|
|
|
|
#undef __field
|
|
#define __field(type, item)
|
|
|
|
#undef __array
|
|
#define __array(type, item, len)
|
|
|
|
#undef __dynamic_array
|
|
#define __dynamic_array(type, item, len) \
|
|
__entry->__data_loc_##item = __data_offsets.item;
|
|
|
|
#undef __string
|
|
#define __string(item, src) __dynamic_array(char, item, -1) \
|
|
|
|
#undef __assign_str
|
|
#define __assign_str(dst, src) \
|
|
strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
|
|
|
|
#undef TP_fast_assign
|
|
#define TP_fast_assign(args...) args
|
|
|
|
#undef __perf_addr
|
|
#define __perf_addr(a) (a)
|
|
|
|
#undef __perf_count
|
|
#define __perf_count(c) (c)
|
|
|
|
#undef __perf_task
|
|
#define __perf_task(t) (t)
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
|
\
|
|
static notrace void \
|
|
ftrace_raw_event_##call(void *__data, proto) \
|
|
{ \
|
|
struct ftrace_event_file *ftrace_file = __data; \
|
|
struct ftrace_event_call *event_call = ftrace_file->event_call; \
|
|
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
|
struct ring_buffer_event *event; \
|
|
struct ftrace_raw_##call *entry; \
|
|
struct ring_buffer *buffer; \
|
|
unsigned long irq_flags; \
|
|
int __data_size; \
|
|
int pc; \
|
|
\
|
|
if (ftrace_trigger_soft_disabled(ftrace_file)) \
|
|
return; \
|
|
\
|
|
local_save_flags(irq_flags); \
|
|
pc = preempt_count(); \
|
|
\
|
|
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
|
\
|
|
event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
|
|
event_call->event.type, \
|
|
sizeof(*entry) + __data_size, \
|
|
irq_flags, pc); \
|
|
if (!event) \
|
|
return; \
|
|
entry = ring_buffer_event_data(event); \
|
|
\
|
|
tstruct \
|
|
\
|
|
{ assign; } \
|
|
\
|
|
event_trigger_unlock_commit(ftrace_file, buffer, event, entry, \
|
|
irq_flags, pc); \
|
|
}
|
|
/*
|
|
* The ftrace_test_probe is compiled out, it is only here as a build time check
|
|
* to make sure that if the tracepoint handling changes, the ftrace probe will
|
|
* fail to compile unless it too is updated.
|
|
*/
|
|
|
|
#undef DEFINE_EVENT
|
|
#define DEFINE_EVENT(template, call, proto, args) \
|
|
static inline void ftrace_test_probe_##call(void) \
|
|
{ \
|
|
check_trace_callback_type_##call(ftrace_raw_event_##template); \
|
|
}
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
#undef __entry
|
|
#define __entry REC
|
|
|
|
#undef __print_flags
|
|
#undef __print_symbolic
|
|
#undef __print_hex
|
|
#undef __get_dynamic_array
|
|
#undef __get_str
|
|
|
|
#undef TP_printk
|
|
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
|
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
|
|
static const char print_fmt_##call[] = print; \
|
|
static struct ftrace_event_class __used __refdata event_class_##call = { \
|
|
.system = __stringify(TRACE_SYSTEM), \
|
|
.define_fields = ftrace_define_fields_##call, \
|
|
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
|
|
.raw_init = trace_event_raw_init, \
|
|
.probe = ftrace_raw_event_##call, \
|
|
.reg = ftrace_event_reg, \
|
|
_TRACE_PERF_INIT(call) \
|
|
};
|
|
|
|
#undef DEFINE_EVENT
|
|
#define DEFINE_EVENT(template, call, proto, args) \
|
|
\
|
|
static struct ftrace_event_call __used event_##call = { \
|
|
.name = #call, \
|
|
.class = &event_class_##template, \
|
|
.event.funcs = &ftrace_event_type_funcs_##template, \
|
|
.print_fmt = print_fmt_##template, \
|
|
}; \
|
|
static struct ftrace_event_call __used \
|
|
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
|
|
\
|
|
static const char print_fmt_##call[] = print; \
|
|
\
|
|
static struct ftrace_event_call __used event_##call = { \
|
|
.name = #call, \
|
|
.class = &event_class_##template, \
|
|
.event.funcs = &ftrace_event_type_funcs_##call, \
|
|
.print_fmt = print_fmt_##call, \
|
|
}; \
|
|
static struct ftrace_event_call __used \
|
|
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
#undef __entry
|
|
#define __entry entry
|
|
|
|
#undef __get_dynamic_array
|
|
#define __get_dynamic_array(field) \
|
|
((void *)__entry + (__entry->__data_loc_##field & 0xffff))
|
|
|
|
#undef __get_str
|
|
#define __get_str(field) (char *)__get_dynamic_array(field)
|
|
|
|
#undef __perf_addr
|
|
#define __perf_addr(a) (__addr = (a))
|
|
|
|
#undef __perf_count
|
|
#define __perf_count(c) (__count = (c))
|
|
|
|
#undef __perf_task
|
|
#define __perf_task(t) (__task = (t))
|
|
|
|
#undef DECLARE_EVENT_CLASS
|
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
|
static notrace void \
|
|
perf_trace_##call(void *__data, proto) \
|
|
{ \
|
|
struct ftrace_event_call *event_call = __data; \
|
|
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
|
struct ftrace_raw_##call *entry; \
|
|
struct pt_regs __regs; \
|
|
u64 __addr = 0, __count = 1; \
|
|
struct task_struct *__task = NULL; \
|
|
struct hlist_head *head; \
|
|
int __entry_size; \
|
|
int __data_size; \
|
|
int rctx; \
|
|
\
|
|
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
|
\
|
|
head = this_cpu_ptr(event_call->perf_events); \
|
|
if (__builtin_constant_p(!__task) && !__task && \
|
|
hlist_empty(head)) \
|
|
return; \
|
|
\
|
|
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
|
|
sizeof(u64)); \
|
|
__entry_size -= sizeof(u32); \
|
|
\
|
|
perf_fetch_caller_regs(&__regs); \
|
|
entry = perf_trace_buf_prepare(__entry_size, \
|
|
event_call->event.type, &__regs, &rctx); \
|
|
if (!entry) \
|
|
return; \
|
|
\
|
|
tstruct \
|
|
\
|
|
{ assign; } \
|
|
\
|
|
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
|
|
__count, &__regs, head, __task); \
|
|
}
|
|
|
|
/*
|
|
* This part is compiled out, it is only here as a build time check
|
|
* to make sure that if the tracepoint handling changes, the
|
|
* perf probe will fail to compile unless it too is updated.
|
|
*/
|
|
#undef DEFINE_EVENT
|
|
#define DEFINE_EVENT(template, call, proto, args) \
|
|
static inline void perf_test_probe_##call(void) \
|
|
{ \
|
|
check_trace_callback_type_##call(perf_trace_##template); \
|
|
}
|
|
|
|
|
|
#undef DEFINE_EVENT_PRINT
|
|
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
|
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
#endif /* CONFIG_PERF_EVENTS */
|
|
|