Merge tag 'trace-v5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:

 - Added new "bootconfig".

   This looks for a file appended to initrd to add boot config options,
   and has been discussed thoroughly at Linux Plumbers.

   Very useful for adding kprobes at bootup.

   Only enabled if "bootconfig" is on the real kernel command line.

 - Created dynamic event creation.

   Merges common code between creating synthetic events and kprobe
   events.

 - Rename perf "ring_buffer" structure to "perf_buffer"

 - Rename ftrace "ring_buffer" structure to "trace_buffer"

   Had to rename existing "trace_buffer" to "array_buffer"

 - Allow trace_printk() to work withing (some) tracing code.

 - Sort of tracing configs to be a little better organized

 - Fixed bug where ftrace_graph hash was not being protected properly

 - Various other small fixes and clean ups

* tag 'trace-v5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (88 commits)
  bootconfig: Show the number of nodes on boot message
  tools/bootconfig: Show the number of bootconfig nodes
  bootconfig: Add more parse error messages
  bootconfig: Use bootconfig instead of boot config
  ftrace: Protect ftrace_graph_hash with ftrace_sync
  ftrace: Add comment to why rcu_dereference_sched() is open coded
  tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu
  tracing: Annotate ftrace_graph_hash pointer with __rcu
  bootconfig: Only load bootconfig if "bootconfig" is on the kernel cmdline
  tracing: Use seq_buf for building dynevent_cmd string
  tracing: Remove useless code in dynevent_arg_pair_add()
  tracing: Remove check_arg() callbacks from dynevent args
  tracing: Consolidate some synth_event_trace code
  tracing: Fix now invalid var_ref_vals assumption in trace action
  tracing: Change trace_boot to use synth_event interface
  tracing: Move tracing selftests to bottom of menu
  tracing: Move mmio tracer config up with the other tracers
  tracing: Move tracing test module configs together
  tracing: Move all function tracing configs together
  tracing: Documentation for in-kernel synthetic event API
  ...
This commit is contained in:
Linus Torvalds
2020-02-06 07:12:11 +00:00
90 changed files with 6491 additions and 837 deletions

View File

@@ -4373,7 +4373,7 @@ static void free_event_rcu(struct rcu_head *head)
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb);
struct perf_buffer *rb);
static void detach_sb_event(struct perf_event *event)
{
@@ -5054,7 +5054,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static __poll_t perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
struct perf_buffer *rb;
__poll_t events = EPOLLHUP;
poll_wait(file, &event->waitq, wait);
@@ -5296,7 +5296,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
return perf_event_set_bpf_prog(event, arg);
case PERF_EVENT_IOC_PAUSE_OUTPUT: {
struct ring_buffer *rb;
struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5432,7 +5432,7 @@ static void calc_timer_values(struct perf_event *event,
static void perf_event_init_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5464,7 +5464,7 @@ void __weak arch_perf_update_userpage(
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
struct perf_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
@@ -5515,7 +5515,7 @@ EXPORT_SYMBOL_GPL(perf_event_update_userpage);
static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
{
struct perf_event *event = vmf->vma->vm_file->private_data;
struct ring_buffer *rb;
struct perf_buffer *rb;
vm_fault_t ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -5548,9 +5548,9 @@ unlock:
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
struct perf_buffer *rb)
{
struct ring_buffer *old_rb = NULL;
struct perf_buffer *old_rb = NULL;
unsigned long flags;
if (event->rb) {
@@ -5608,7 +5608,7 @@ static void ring_buffer_attach(struct perf_event *event,
static void ring_buffer_wakeup(struct perf_event *event)
{
struct ring_buffer *rb;
struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5619,9 +5619,9 @@ static void ring_buffer_wakeup(struct perf_event *event)
rcu_read_unlock();
}
struct ring_buffer *ring_buffer_get(struct perf_event *event)
struct perf_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5634,7 +5634,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
return rb;
}
void ring_buffer_put(struct ring_buffer *rb)
void ring_buffer_put(struct perf_buffer *rb)
{
if (!refcount_dec_and_test(&rb->refcount))
return;
@@ -5672,7 +5672,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb = ring_buffer_get(event);
struct perf_buffer *rb = ring_buffer_get(event);
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
@@ -5790,8 +5790,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
struct perf_buffer *rb = NULL;
unsigned long locked, lock_limit;
struct ring_buffer *rb = NULL;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra = 0, extra = 0;
@@ -6266,7 +6266,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
size_t size)
{
struct perf_event *sampler = event->aux_event;
struct ring_buffer *rb;
struct perf_buffer *rb;
data->aux_size = 0;
@@ -6299,7 +6299,7 @@ out:
return data->aux_size;
}
long perf_pmu_snapshot_aux(struct ring_buffer *rb,
long perf_pmu_snapshot_aux(struct perf_buffer *rb,
struct perf_event *event,
struct perf_output_handle *handle,
unsigned long size)
@@ -6338,8 +6338,8 @@ static void perf_aux_sample_output(struct perf_event *event,
struct perf_sample_data *data)
{
struct perf_event *sampler = event->aux_event;
struct perf_buffer *rb;
unsigned long pad;
struct ring_buffer *rb;
long size;
if (WARN_ON_ONCE(!sampler || !data->aux_size))
@@ -6707,7 +6707,7 @@ void perf_output_sample(struct perf_output_handle *handle,
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
struct perf_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
@@ -7150,7 +7150,7 @@ void perf_event_exec(void)
}
struct remote_output {
struct ring_buffer *rb;
struct perf_buffer *rb;
int err;
};
@@ -7158,7 +7158,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
{
struct perf_event *parent = event->parent;
struct remote_output *ro = data;
struct ring_buffer *rb = ro->rb;
struct perf_buffer *rb = ro->rb;
struct stop_event_data sd = {
.event = event,
};
@@ -10998,7 +10998,7 @@ err_size:
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
struct ring_buffer *rb = NULL;
struct perf_buffer *rb = NULL;
int ret = -EINVAL;
if (!output_event)

View File

@@ -10,7 +10,7 @@
#define RING_BUFFER_WRITABLE 0x01
struct ring_buffer {
struct perf_buffer {
refcount_t refcount;
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
@@ -58,17 +58,17 @@ struct ring_buffer {
void *data_pages[0];
};
extern void rb_free(struct ring_buffer *rb);
extern void rb_free(struct perf_buffer *rb);
static inline void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
struct perf_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb = container_of(rcu_head, struct perf_buffer, rcu_head);
rb_free(rb);
}
static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
{
if (!pause && rb->nr_pages)
rb->paused = 0;
@@ -76,16 +76,16 @@ static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
rb->paused = 1;
}
extern struct ring_buffer *
extern struct perf_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event);
extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, long watermark, int flags);
extern void rb_free_aux(struct ring_buffer *rb);
extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
extern void ring_buffer_put(struct ring_buffer *rb);
extern void rb_free_aux(struct perf_buffer *rb);
extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
extern void ring_buffer_put(struct perf_buffer *rb);
static inline bool rb_has_aux(struct ring_buffer *rb)
static inline bool rb_has_aux(struct perf_buffer *rb)
{
return !!rb->aux_nr_pages;
}
@@ -94,7 +94,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
unsigned long size, u64 flags);
extern struct page *
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
#ifdef CONFIG_PERF_USE_VMALLOC
/*
@@ -103,25 +103,25 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
* Required for architectures that have d-cache aliasing issues.
*/
static inline int page_order(struct ring_buffer *rb)
static inline int page_order(struct perf_buffer *rb)
{
return rb->page_order;
}
#else
static inline int page_order(struct ring_buffer *rb)
static inline int page_order(struct perf_buffer *rb)
{
return 0;
}
#endif
static inline unsigned long perf_data_size(struct ring_buffer *rb)
static inline unsigned long perf_data_size(struct perf_buffer *rb)
{
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}
static inline unsigned long perf_aux_size(struct ring_buffer *rb)
static inline unsigned long perf_aux_size(struct perf_buffer *rb)
{
return rb->aux_nr_pages << PAGE_SHIFT;
}
@@ -141,7 +141,7 @@ static inline unsigned long perf_aux_size(struct ring_buffer *rb)
buf += written; \
handle->size -= written; \
if (!handle->size) { \
struct ring_buffer *rb = handle->rb; \
struct perf_buffer *rb = handle->rb; \
\
handle->page++; \
handle->page &= rb->nr_pages - 1; \

View File

@@ -35,7 +35,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
*/
static void perf_output_get_handle(struct perf_output_handle *handle)
{
struct ring_buffer *rb = handle->rb;
struct perf_buffer *rb = handle->rb;
preempt_disable();
@@ -49,7 +49,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
static void perf_output_put_handle(struct perf_output_handle *handle)
{
struct ring_buffer *rb = handle->rb;
struct perf_buffer *rb = handle->rb;
unsigned long head;
unsigned int nest;
@@ -150,7 +150,7 @@ __perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
bool backward)
{
struct ring_buffer *rb;
struct perf_buffer *rb;
unsigned long tail, offset, head;
int have_lost, page_shift;
struct {
@@ -301,7 +301,7 @@ void perf_output_end(struct perf_output_handle *handle)
}
static void
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
{
long max_size = perf_data_size(rb);
@@ -361,7 +361,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
{
struct perf_event *output_event = event;
unsigned long aux_head, aux_tail;
struct ring_buffer *rb;
struct perf_buffer *rb;
unsigned int nest;
if (output_event->parent)
@@ -449,7 +449,7 @@ err:
}
EXPORT_SYMBOL_GPL(perf_aux_output_begin);
static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
{
if (rb->aux_overwrite)
return false;
@@ -475,7 +475,7 @@ static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
{
bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
struct ring_buffer *rb = handle->rb;
struct perf_buffer *rb = handle->rb;
unsigned long aux_head;
/* in overwrite mode, driver provides aux_head via handle */
@@ -532,7 +532,7 @@ EXPORT_SYMBOL_GPL(perf_aux_output_end);
*/
int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
{
struct ring_buffer *rb = handle->rb;
struct perf_buffer *rb = handle->rb;
if (size > handle->size)
return -ENOSPC;
@@ -569,8 +569,8 @@ long perf_output_copy_aux(struct perf_output_handle *aux_handle,
struct perf_output_handle *handle,
unsigned long from, unsigned long to)
{
struct perf_buffer *rb = aux_handle->rb;
unsigned long tocopy, remainder, len = 0;
struct ring_buffer *rb = aux_handle->rb;
void *addr;
from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
@@ -626,7 +626,7 @@ static struct page *rb_alloc_aux_page(int node, int order)
return page;
}
static void rb_free_aux_page(struct ring_buffer *rb, int idx)
static void rb_free_aux_page(struct perf_buffer *rb, int idx)
{
struct page *page = virt_to_page(rb->aux_pages[idx]);
@@ -635,7 +635,7 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
__free_page(page);
}
static void __rb_free_aux(struct ring_buffer *rb)
static void __rb_free_aux(struct perf_buffer *rb)
{
int pg;
@@ -662,7 +662,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
}
}
int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, long watermark, int flags)
{
bool overwrite = !(flags & RING_BUFFER_WRITABLE);
@@ -753,7 +753,7 @@ out:
return ret;
}
void rb_free_aux(struct ring_buffer *rb)
void rb_free_aux(struct perf_buffer *rb)
{
if (refcount_dec_and_test(&rb->aux_refcount))
__rb_free_aux(rb);
@@ -766,7 +766,7 @@ void rb_free_aux(struct ring_buffer *rb)
*/
static struct page *
__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
if (pgoff > rb->nr_pages)
return NULL;
@@ -798,13 +798,13 @@ static void perf_mmap_free_page(void *addr)
__free_page(page);
}
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
struct ring_buffer *rb;
struct perf_buffer *rb;
unsigned long size;
int i;
size = sizeof(struct ring_buffer);
size = sizeof(struct perf_buffer);
size += nr_pages * sizeof(void *);
if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
@@ -843,7 +843,7 @@ fail:
return NULL;
}
void rb_free(struct ring_buffer *rb)
void rb_free(struct perf_buffer *rb)
{
int i;
@@ -854,13 +854,13 @@ void rb_free(struct ring_buffer *rb)
}
#else
static int data_page_nr(struct ring_buffer *rb)
static int data_page_nr(struct perf_buffer *rb)
{
return rb->nr_pages << page_order(rb);
}
static struct page *
__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
/* The '>' counts in the user page. */
if (pgoff > data_page_nr(rb))
@@ -878,11 +878,11 @@ static void perf_mmap_unmark_page(void *addr)
static void rb_free_work(struct work_struct *work)
{
struct ring_buffer *rb;
struct perf_buffer *rb;
void *base;
int i, nr;
rb = container_of(work, struct ring_buffer, work);
rb = container_of(work, struct perf_buffer, work);
nr = data_page_nr(rb);
base = rb->user_page;
@@ -894,18 +894,18 @@ static void rb_free_work(struct work_struct *work)
kfree(rb);
}
void rb_free(struct ring_buffer *rb)
void rb_free(struct perf_buffer *rb)
{
schedule_work(&rb->work);
}
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
struct ring_buffer *rb;
struct perf_buffer *rb;
unsigned long size;
void *all_buf;
size = sizeof(struct ring_buffer);
size = sizeof(struct perf_buffer);
size += sizeof(void *);
rb = kzalloc(size, GFP_KERNEL);
@@ -939,7 +939,7 @@ fail:
#endif
struct page *
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
if (rb->aux_nr_pages) {
/* above AUX space */

View File

@@ -141,6 +141,15 @@ menuconfig FTRACE
if FTRACE
config BOOTTIME_TRACING
bool "Boot-time Tracing support"
depends on BOOT_CONFIG && TRACING
default y
help
Enable developer to setup ftrace subsystem via supplemental
kernel cmdline at boot time for debugging (tracing) driver
initialization and boot process.
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER
@@ -172,6 +181,77 @@ config FUNCTION_GRAPH_TRACER
the return value. This is done by setting the current return
address on the current task structure into a stack of calls.
config DYNAMIC_FTRACE
bool "enable/disable function tracing dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
default y
help
This option will modify all the calls to function tracing
dynamically (will patch them out of the binary image and
replace them with a No-Op instruction) on boot up. During
compile time, a table is made of all the locations that ftrace
can function trace, and this table is linked into the kernel
image. When this is enabled, functions can be individually
enabled, and the functions not enabled will not affect
performance of the system.
See the files in /sys/kernel/debug/tracing:
available_filter_functions
set_ftrace_filter
set_ftrace_notrace
This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
otherwise has native performance as long as no tracing is active.
config DYNAMIC_FTRACE_WITH_REGS
def_bool y
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
def_bool y
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
config FUNCTION_PROFILER
bool "Kernel function profiler"
depends on FUNCTION_TRACER
default n
help
This option enables the kernel function profiler. A file is created
in debugfs called function_profile_enabled which defaults to zero.
When a 1 is echoed into this file profiling begins, and when a
zero is entered, profiling stops. A "functions" file is created in
the trace_stat directory; this file shows the list of functions that
have been hit and their counters.
If in doubt, say N.
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
select FUNCTION_TRACER
select STACKTRACE
select KALLSYMS
help
This special tracer records the maximum stack footprint of the
kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
This tracer works by hooking into every function call that the
kernel executes, and keeping a maximum stack depth value and
stack-trace saved. If this is configured with DYNAMIC_FTRACE
then it will not have any overhead while the stack tracer
is disabled.
To enable the stack tracer on bootup, pass in 'stacktrace'
on the kernel command line.
The stack tracer can also be enabled or disabled via the
sysctl kernel.stack_tracer_enabled
Say N if unsure.
config TRACE_PREEMPT_TOGGLE
bool
help
@@ -282,6 +362,19 @@ config HWLAT_TRACER
file. Every time a latency is greater than tracing_thresh, it will
be recorded into the ring buffer.
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
select GENERIC_TRACER
help
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap
implementation and works via page faults. Tracing is disabled by
default and can be enabled at run-time.
See Documentation/trace/mmiotrace.rst.
If you are not helping to develop drivers, say N.
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
@@ -410,30 +503,6 @@ config BRANCH_TRACER
Say N if unsure.
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
select FUNCTION_TRACER
select STACKTRACE
select KALLSYMS
help
This special tracer records the maximum stack footprint of the
kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
This tracer works by hooking into every function call that the
kernel executes, and keeping a maximum stack depth value and
stack-trace saved. If this is configured with DYNAMIC_FTRACE
then it will not have any overhead while the stack tracer
is disabled.
To enable the stack tracer on bootup, pass in 'stacktrace'
on the kernel command line.
The stack tracer can also be enabled or disabled via the
sysctl kernel.stack_tracer_enabled
Say N if unsure.
config BLK_DEV_IO_TRACE
bool "Support for tracing block IO actions"
depends on SYSFS
@@ -531,53 +600,6 @@ config DYNAMIC_EVENTS
config PROBE_EVENTS
def_bool n
config DYNAMIC_FTRACE
bool "enable/disable function tracing dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
default y
help
This option will modify all the calls to function tracing
dynamically (will patch them out of the binary image and
replace them with a No-Op instruction) on boot up. During
compile time, a table is made of all the locations that ftrace
can function trace, and this table is linked into the kernel
image. When this is enabled, functions can be individually
enabled, and the functions not enabled will not affect
performance of the system.
See the files in /sys/kernel/debug/tracing:
available_filter_functions
set_ftrace_filter
set_ftrace_notrace
This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
otherwise has native performance as long as no tracing is active.
config DYNAMIC_FTRACE_WITH_REGS
def_bool y
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
def_bool y
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
config FUNCTION_PROFILER
bool "Kernel function profiler"
depends on FUNCTION_TRACER
default n
help
This option enables the kernel function profiler. A file is created
in debugfs called function_profile_enabled which defaults to zero.
When a 1 is echoed into this file profiling begins, and when a
zero is entered, profiling stops. A "functions" file is created in
the trace_stat directory; this file shows the list of functions that
have been hit and their counters.
If in doubt, say N.
config BPF_KPROBE_OVERRIDE
bool "Enable BPF programs to override a kprobed function"
depends on BPF_EVENTS
@@ -592,54 +614,6 @@ config FTRACE_MCOUNT_RECORD
depends on DYNAMIC_FTRACE
depends on HAVE_FTRACE_MCOUNT_RECORD
config FTRACE_SELFTEST
bool
config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace"
depends on GENERIC_TRACER
select FTRACE_SELFTEST
help
This option performs a series of startup tests on ftrace. On bootup
a series of tests are made to verify that the tracer is
functioning properly. It will do tests on all the configured
tracers of ftrace.
config EVENT_TRACE_STARTUP_TEST
bool "Run selftest on trace events"
depends on FTRACE_STARTUP_TEST
default y
help
This option performs a test on all trace events in the system.
It basically just enables each event and runs some code that
will trigger events (not necessarily the event it enables)
This may take some time run as there are a lot of events.
config EVENT_TRACE_TEST_SYSCALLS
bool "Run selftest on syscall events"
depends on EVENT_TRACE_STARTUP_TEST
help
This option will also enable testing every syscall event.
It only enables the event and disables it and runs various loads
with the event enabled. This adds a bit more time for kernel boot
up since it runs this on every system call defined.
TBD - enable a way to actually call the syscalls as we test their
events
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
select GENERIC_TRACER
help
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap
implementation and works via page faults. Tracing is disabled by
default and can be enabled at run-time.
See Documentation/trace/mmiotrace.rst.
If you are not helping to develop drivers, say N.
config TRACING_MAP
bool
depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -680,16 +654,6 @@ config TRACE_EVENT_INJECT
If unsure, say N.
config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
help
This is a dumb module for testing mmiotrace. It is very dangerous
as it will write garbage to IO memory starting at a given address.
However, it should be safe to use on e.g. unused portion of VRAM.
Say N, unless you absolutely know what you are doing.
config TRACEPOINT_BENCHMARK
bool "Add tracepoint that benchmarks tracepoints"
help
@@ -736,44 +700,6 @@ config RING_BUFFER_BENCHMARK
If unsure, say N.
config RING_BUFFER_STARTUP_TEST
bool "Ring buffer startup self test"
depends on RING_BUFFER
help
Run a simple self test on the ring buffer on boot up. Late in the
kernel boot sequence, the test will start that kicks off
a thread per cpu. Each thread will write various size events
into the ring buffer. Another thread is created to send IPIs
to each of the threads, where the IPI handler will also write
to the ring buffer, to test/stress the nesting ability.
If any anomalies are discovered, a warning will be displayed
and all ring buffers will be disabled.
The test runs for 10 seconds. This will slow your boot time
by at least 10 more seconds.
At the end of the test, statics and more checks are done.
It will output the stats of each per cpu buffer. What
was written, the sizes, what was read, what was lost, and
other similar details.
If unsure, say N
config PREEMPTIRQ_DELAY_TEST
tristate "Preempt / IRQ disable delay thread to test latency tracers"
depends on m
help
Select this option to build a test module that can help test latency
tracers by executing a preempt or irq disable section with a user
configurable delay. The module busy waits for the duration of the
critical section.
For example, the following invocation generates a burst of three
irq-disabled critical sections for 500us:
modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
If unsure, say N
config TRACE_EVAL_MAP_FILE
bool "Show eval mappings for trace events"
depends on TRACING
@@ -814,6 +740,114 @@ config GCOV_PROFILE_FTRACE
Note that on a kernel compiled with this config, ftrace will
run significantly slower.
config FTRACE_SELFTEST
bool
config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace"
depends on GENERIC_TRACER
select FTRACE_SELFTEST
help
This option performs a series of startup tests on ftrace. On bootup
a series of tests are made to verify that the tracer is
functioning properly. It will do tests on all the configured
tracers of ftrace.
config EVENT_TRACE_STARTUP_TEST
bool "Run selftest on trace events"
depends on FTRACE_STARTUP_TEST
default y
help
This option performs a test on all trace events in the system.
It basically just enables each event and runs some code that
will trigger events (not necessarily the event it enables)
This may take some time run as there are a lot of events.
config EVENT_TRACE_TEST_SYSCALLS
bool "Run selftest on syscall events"
depends on EVENT_TRACE_STARTUP_TEST
help
This option will also enable testing every syscall event.
It only enables the event and disables it and runs various loads
with the event enabled. This adds a bit more time for kernel boot
up since it runs this on every system call defined.
TBD - enable a way to actually call the syscalls as we test their
events
config RING_BUFFER_STARTUP_TEST
bool "Ring buffer startup self test"
depends on RING_BUFFER
help
Run a simple self test on the ring buffer on boot up. Late in the
kernel boot sequence, the test will start that kicks off
a thread per cpu. Each thread will write various size events
into the ring buffer. Another thread is created to send IPIs
to each of the threads, where the IPI handler will also write
to the ring buffer, to test/stress the nesting ability.
If any anomalies are discovered, a warning will be displayed
and all ring buffers will be disabled.
The test runs for 10 seconds. This will slow your boot time
by at least 10 more seconds.
At the end of the test, statics and more checks are done.
It will output the stats of each per cpu buffer. What
was written, the sizes, what was read, what was lost, and
other similar details.
If unsure, say N
config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
help
This is a dumb module for testing mmiotrace. It is very dangerous
as it will write garbage to IO memory starting at a given address.
However, it should be safe to use on e.g. unused portion of VRAM.
Say N, unless you absolutely know what you are doing.
config PREEMPTIRQ_DELAY_TEST
tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
depends on m
help
Select this option to build a test module that can help test latency
tracers by executing a preempt or irq disable section with a user
configurable delay. The module busy waits for the duration of the
critical section.
For example, the following invocation generates a burst of three
irq-disabled critical sections for 500us:
modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
If unsure, say N
config SYNTH_EVENT_GEN_TEST
tristate "Test module for in-kernel synthetic event generation"
depends on HIST_TRIGGERS
help
This option creates a test module to check the base
functionality of in-kernel synthetic event definition and
generation.
To test, insert the module, and then check the trace buffer
for the generated sample events.
If unsure, say N.
config KPROBE_EVENT_GEN_TEST
tristate "Test module for in-kernel kprobe event generation"
depends on KPROBE_EVENTS
help
This option creates a test module to check the base
functionality of in-kernel kprobe event definition.
To test, insert the module, and then check the trace buffer
for the generated kprobe events.
If unsure, say N.
endif # FTRACE
endif # TRACING_SUPPORT

View File

@@ -44,6 +44,8 @@ obj-$(CONFIG_TRACING) += trace_stat.o
obj-$(CONFIG_TRACING) += trace_printk.o
obj-$(CONFIG_TRACING_MAP) += tracing_map.o
obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o
obj-$(CONFIG_KPROBE_EVENT_GEN_TEST) += kprobe_event_gen_test.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o
@@ -83,6 +85,7 @@ endif
obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o

View File

@@ -68,14 +68,14 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
{
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
struct ring_buffer *buffer = NULL;
struct trace_buffer *buffer = NULL;
int pc = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (blk_tracer) {
buffer = blk_tr->trace_buffer.buffer;
buffer = blk_tr->array_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len + cgid_len,
@@ -215,7 +215,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
struct ring_buffer *buffer = NULL;
struct trace_buffer *buffer = NULL;
struct blk_io_trace *t;
unsigned long flags = 0;
unsigned long *sequence;
@@ -248,7 +248,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
if (blk_tracer) {
tracing_record_cmdline(current);
buffer = blk_tr->trace_buffer.buffer;
buffer = blk_tr->array_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len + cgid_len,

View File

@@ -62,8 +62,6 @@
})
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
@@ -146,7 +144,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
{
struct trace_array *tr = op->private;
if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
return;
op->saved_func(ip, parent_ip, op, regs);
@@ -1103,9 +1101,6 @@ struct ftrace_page {
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
/* estimate from running different kernels */
#define NR_TO_INIT 10000
static struct ftrace_page *ftrace_pages_start;
static struct ftrace_page *ftrace_pages;
@@ -5464,7 +5459,7 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
struct ftrace_hash *hash;
hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
if (WARN_ON(!hash))
if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
return;
while (buf) {
@@ -5596,8 +5591,8 @@ static const struct file_operations ftrace_notrace_fops = {
static DEFINE_MUTEX(graph_lock);
struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
enum graph_filter_type {
GRAPH_FILTER_NOTRACE = 0,
@@ -5872,8 +5867,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
mutex_unlock(&graph_lock);
/* Wait till all users are no longer using the old hash */
synchronize_rcu();
/*
* We need to do a hard force of sched synchronization.
* This is because we use preempt_disable() to do RCU, but
* the function tracers can be called where RCU is not watching
* (like before user_exit()). We can not rely on the RCU
* infrastructure to do the synchronization, thus we must do it
* ourselves.
*/
schedule_on_each_cpu(ftrace_sync);
free_ftrace_hash(old_hash);
}
@@ -6596,7 +6598,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list,
func = kmalloc(sizeof(*func), GFP_KERNEL);
if (!func) {
WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
return;
}
@@ -6922,7 +6924,7 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->function_pids);
this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
trace_ignore_this_task(pid_list, next));
}
@@ -6976,7 +6978,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
for_each_possible_cpu(cpu)
per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
rcu_assign_pointer(tr->function_pids, NULL);
@@ -7031,9 +7033,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
struct trace_array *tr = m->private;
struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
if (v == FTRACE_NO_PIDS)
if (v == FTRACE_NO_PIDS) {
(*pos)++;
return NULL;
}
return trace_pid_next(pid_list, v, pos);
}
@@ -7100,7 +7103,7 @@ static void ignore_task_cpu(void *data)
pid_list = rcu_dereference_protected(tr->function_pids,
mutex_is_locked(&ftrace_lock));
this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
trace_ignore_this_task(pid_list, current));
}

View File

@@ -0,0 +1,225 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test module for in-kernel kprobe event creation and generation.
*
* Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
*/
#include <linux/module.h>
#include <linux/trace_events.h>
/*
* This module is a simple test of basic functionality for in-kernel
* kprobe/kretprobe event creation. The first test uses
* kprobe_event_gen_cmd_start(), kprobe_event_add_fields() and
* kprobe_event_gen_cmd_end() to create a kprobe event, which is then
* enabled in order to generate trace output. The second creates a
* kretprobe event using kretprobe_event_gen_cmd_start() and
* kretprobe_event_gen_cmd_end(), and is also then enabled.
*
* To test, select CONFIG_KPROBE_EVENT_GEN_TEST and build the module.
* Then:
*
* # insmod kernel/trace/kprobe_event_gen_test.ko
* # cat /sys/kernel/debug/tracing/trace
*
* You should see many instances of the "gen_kprobe_test" and
* "gen_kretprobe_test" events in the trace buffer.
*
* To remove the events, remove the module:
*
* # rmmod kprobe_event_gen_test
*
*/
static struct trace_event_file *gen_kprobe_test;
static struct trace_event_file *gen_kretprobe_test;
/*
* Test to make sure we can create a kprobe event, then add more
* fields.
*/
static int __init test_gen_kprobe_cmd(void)
{
struct dynevent_cmd cmd;
char *buf;
int ret;
/* Create a buffer to hold the generated command */
buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Before generating the command, initialize the cmd object */
kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
/*
* Define the gen_kprobe_test event with the first 2 kprobe
* fields.
*/
ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test",
"do_sys_open",
"dfd=%ax", "filename=%dx");
if (ret)
goto free;
/* Use kprobe_event_add_fields to add the rest of the fields */
ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
if (ret)
goto free;
/*
* This actually creates the event.
*/
ret = kprobe_event_gen_cmd_end(&cmd);
if (ret)
goto free;
/*
* Now get the gen_kprobe_test event file. We need to prevent
* the instance and event from disappearing from underneath
* us, which trace_get_event_file() does (though in this case
* we're using the top-level instance which never goes away).
*/
gen_kprobe_test = trace_get_event_file(NULL, "kprobes",
"gen_kprobe_test");
if (IS_ERR(gen_kprobe_test)) {
ret = PTR_ERR(gen_kprobe_test);
goto delete;
}
/* Enable the event or you won't see anything */
ret = trace_array_set_clr_event(gen_kprobe_test->tr,
"kprobes", "gen_kprobe_test", true);
if (ret) {
trace_put_event_file(gen_kprobe_test);
goto delete;
}
out:
return ret;
delete:
/* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kprobe_test");
free:
kfree(buf);
goto out;
}
/*
* Test to make sure we can create a kretprobe event.
*/
static int __init test_gen_kretprobe_cmd(void)
{
struct dynevent_cmd cmd;
char *buf;
int ret;
/* Create a buffer to hold the generated command */
buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Before generating the command, initialize the cmd object */
kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
/*
* Define the kretprobe event.
*/
ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
"do_sys_open",
"$retval");
if (ret)
goto free;
/*
* This actually creates the event.
*/
ret = kretprobe_event_gen_cmd_end(&cmd);
if (ret)
goto free;
/*
* Now get the gen_kretprobe_test event file. We need to
* prevent the instance and event from disappearing from
* underneath us, which trace_get_event_file() does (though in
* this case we're using the top-level instance which never
* goes away).
*/
gen_kretprobe_test = trace_get_event_file(NULL, "kprobes",
"gen_kretprobe_test");
if (IS_ERR(gen_kretprobe_test)) {
ret = PTR_ERR(gen_kretprobe_test);
goto delete;
}
/* Enable the event or you won't see anything */
ret = trace_array_set_clr_event(gen_kretprobe_test->tr,
"kprobes", "gen_kretprobe_test", true);
if (ret) {
trace_put_event_file(gen_kretprobe_test);
goto delete;
}
out:
return ret;
delete:
/* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kretprobe_test");
free:
kfree(buf);
goto out;
}
static int __init kprobe_event_gen_test_init(void)
{
int ret;
ret = test_gen_kprobe_cmd();
if (ret)
return ret;
ret = test_gen_kretprobe_cmd();
if (ret) {
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
"kprobes",
"gen_kretprobe_test", false));
trace_put_event_file(gen_kretprobe_test);
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
}
return ret;
}
static void __exit kprobe_event_gen_test_exit(void)
{
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
"kprobes",
"gen_kprobe_test", false));
/* Now give the file and instance back */
trace_put_event_file(gen_kprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kprobe_test"));
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
"kprobes",
"gen_kretprobe_test", false));
/* Now give the file and instance back */
trace_put_event_file(gen_kretprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
}
module_init(kprobe_event_gen_test_init)
module_exit(kprobe_event_gen_test_exit)
MODULE_AUTHOR("Tom Zanussi");
MODULE_DESCRIPTION("kprobe event generation test");
MODULE_LICENSE("GPL v2");

View File

@@ -300,8 +300,6 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
/* Missed count stored at end */
#define RB_MISSED_STORED (1 << 30)
#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
struct buffer_data_page {
u64 time_stamp; /* page time stamp */
local_t commit; /* write committed index */
@@ -443,7 +441,7 @@ enum {
struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
struct ring_buffer *buffer;
struct trace_buffer *buffer;
raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
@@ -482,7 +480,7 @@ struct ring_buffer_per_cpu {
struct rb_irq_work irq_work;
};
struct ring_buffer {
struct trace_buffer {
unsigned flags;
int cpus;
atomic_t record_disabled;
@@ -518,7 +516,7 @@ struct ring_buffer_iter {
*
* Returns the number of pages used by a per_cpu buffer of the ring buffer.
*/
size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
{
return buffer->buffers[cpu]->nr_pages;
}
@@ -530,7 +528,7 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
*
* Returns the number of pages that have content in the ring buffer.
*/
size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
{
size_t read;
size_t cnt;
@@ -573,7 +571,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*/
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
{
struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
DEFINE_WAIT(wait);
@@ -684,7 +682,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
* Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
* zero otherwise.
*/
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -742,13 +740,13 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
static inline u64 rb_time_stamp(struct ring_buffer *buffer)
static inline u64 rb_time_stamp(struct trace_buffer *buffer)
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
}
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
{
u64 time;
@@ -760,7 +758,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts)
{
/* Just stupid testing the normalize function and deltas */
@@ -1283,7 +1281,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
}
static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
@@ -1368,16 +1366,17 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
* __ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer.
* @key: ring buffer reader_lock_key.
*
* Currently the only flag that is available is the RB_FL_OVERWRITE
* flag. This flag means that the buffer will overwrite old data
* when the buffer wraps. If this flag is not set, the buffer will
* drop data when the tail hits the head.
*/
struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
struct ring_buffer *buffer;
struct trace_buffer *buffer;
long nr_pages;
int bsize;
int cpu;
@@ -1447,7 +1446,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
* @buffer: the buffer to free.
*/
void
ring_buffer_free(struct ring_buffer *buffer)
ring_buffer_free(struct trace_buffer *buffer)
{
int cpu;
@@ -1463,18 +1462,18 @@ ring_buffer_free(struct ring_buffer *buffer)
}
EXPORT_SYMBOL_GPL(ring_buffer_free);
void ring_buffer_set_clock(struct ring_buffer *buffer,
void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void))
{
buffer->clock = clock;
}
void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
{
buffer->time_stamp_abs = abs;
}
bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
{
return buffer->time_stamp_abs;
}
@@ -1712,7 +1711,7 @@ static void update_pages_handler(struct work_struct *work)
*
* Returns 0 on success and < 0 on failure.
*/
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
int cpu_id)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -1891,7 +1890,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
{
mutex_lock(&buffer->mutex);
if (val)
@@ -2206,7 +2205,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
{
struct buffer_page *tail_page = info->tail_page;
struct buffer_page *commit_page = cpu_buffer->commit_page;
struct ring_buffer *buffer = cpu_buffer->buffer;
struct trace_buffer *buffer = cpu_buffer->buffer;
struct buffer_page *next_page;
int ret;
@@ -2330,11 +2329,11 @@ static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
/**
* rb_update_event - update event type and data
* @cpu_buffer: The per cpu buffer of the @event
* @event: the event to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
* @info: The info to update the @event with (contains length and delta)
*
* Update the type and data fields of the event. The length
* Update the type and data fields of the @event. The length
* is the actual size that is written to the ring buffer,
* and with this, we can determine what to place into the
* data field.
@@ -2609,7 +2608,7 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
}
static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
size_t nr_pages;
size_t dirty;
@@ -2733,7 +2732,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
* Call this function before calling another ring_buffer_lock_reserve() and
* call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
*/
void ring_buffer_nest_start(struct ring_buffer *buffer)
void ring_buffer_nest_start(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
@@ -2753,7 +2752,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
* Must be called after ring_buffer_nest_start() and after the
* ring_buffer_unlock_commit().
*/
void ring_buffer_nest_end(struct ring_buffer *buffer)
void ring_buffer_nest_end(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
@@ -2775,7 +2774,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
*
* Must be paired with ring_buffer_lock_reserve.
*/
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
int ring_buffer_unlock_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -2868,7 +2867,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
}
static __always_inline struct ring_buffer_event *
rb_reserve_next_event(struct ring_buffer *buffer,
rb_reserve_next_event(struct trace_buffer *buffer,
struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length)
{
@@ -2961,7 +2960,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* If NULL is returned, then nothing has been allocated or locked.
*/
struct ring_buffer_event *
ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@@ -3062,7 +3061,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
* If this function is called, do not call ring_buffer_unlock_commit on
* the event.
*/
void ring_buffer_discard_commit(struct ring_buffer *buffer,
void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3113,7 +3112,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* Note, like ring_buffer_lock_reserve, the length is the length of the data
* and not the length of the event which would hold the header.
*/
int ring_buffer_write(struct ring_buffer *buffer,
int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length,
void *data)
{
@@ -3193,7 +3192,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
*
* The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable(struct ring_buffer *buffer)
void ring_buffer_record_disable(struct trace_buffer *buffer)
{
atomic_inc(&buffer->record_disabled);
}
@@ -3206,7 +3205,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
* Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable(struct ring_buffer *buffer)
void ring_buffer_record_enable(struct trace_buffer *buffer)
{
atomic_dec(&buffer->record_disabled);
}
@@ -3223,7 +3222,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
* it works like an on/off switch, where as the disable() version
* must be paired with a enable().
*/
void ring_buffer_record_off(struct ring_buffer *buffer)
void ring_buffer_record_off(struct trace_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
@@ -3246,7 +3245,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off);
* it works like an on/off switch, where as the enable() version
* must be paired with a disable().
*/
void ring_buffer_record_on(struct ring_buffer *buffer)
void ring_buffer_record_on(struct trace_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
@@ -3264,7 +3263,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on);
*
* Returns true if the ring buffer is in a state that it accepts writes.
*/
bool ring_buffer_record_is_on(struct ring_buffer *buffer)
bool ring_buffer_record_is_on(struct trace_buffer *buffer)
{
return !atomic_read(&buffer->record_disabled);
}
@@ -3280,7 +3279,7 @@ bool ring_buffer_record_is_on(struct ring_buffer *buffer)
* ring_buffer_record_disable(), as that is a temporary disabling of
* the ring buffer.
*/
bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
{
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
}
@@ -3295,7 +3294,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
*
* The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3315,7 +3314,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
* Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3345,7 +3344,7 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3378,7 +3377,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3398,7 +3397,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the entries from.
*/
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3417,7 +3416,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3440,7 +3439,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3462,7 +3461,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3483,7 +3482,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
* @cpu: The per CPU buffer to get the number of events read
*/
unsigned long
ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3502,7 +3501,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
* Returns the total number of entries in the ring buffer
* (all CPU entries)
*/
unsigned long ring_buffer_entries(struct ring_buffer *buffer)
unsigned long ring_buffer_entries(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long entries = 0;
@@ -3525,7 +3524,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries);
* Returns the total number of overruns in the ring buffer
* (all CPU entries)
*/
unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long overruns = 0;
@@ -3949,7 +3948,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_peek);
static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer *buffer;
struct trace_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
int nr_loops = 0;
@@ -4077,7 +4076,7 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
* not consume the data.
*/
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4141,7 +4140,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
* and eventually empty the ring buffer if the producer is slower.
*/
struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -4201,7 +4200,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* This overall must be paired with ring_buffer_read_finish.
*/
struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
@@ -4331,8 +4330,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_read);
/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
* @buffer: The ring buffer.
* @cpu: The CPU to get ring buffer size from.
*/
unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
{
/*
* Earlier, this method returned
@@ -4398,7 +4398,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer to reset a per cpu buffer of
* @cpu: The CPU buffer to be reset
*/
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
@@ -4435,7 +4435,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
* ring_buffer_reset - reset a ring buffer
* @buffer: The ring buffer to reset all cpu buffers
*/
void ring_buffer_reset(struct ring_buffer *buffer)
void ring_buffer_reset(struct trace_buffer *buffer)
{
int cpu;
@@ -4448,7 +4448,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
* rind_buffer_empty - is the ring buffer empty?
* @buffer: The ring buffer to test
*/
bool ring_buffer_empty(struct ring_buffer *buffer)
bool ring_buffer_empty(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
@@ -4478,7 +4478,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
* @buffer: The ring buffer
* @cpu: The CPU buffer to test
*/
bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
@@ -4504,14 +4504,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
* @buffer_a: One buffer to swap with
* @buffer_b: The other buffer to swap with
* @cpu: the CPU of the buffers to swap
*
* This function is useful for tracers that want to take a "snapshot"
* of a CPU buffer and has another back up buffer lying around.
* it is expected that the tracer handles the cpu buffer not being
* used at the moment.
*/
int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
struct ring_buffer *buffer_b, int cpu)
int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
struct trace_buffer *buffer_b, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
@@ -4590,7 +4591,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* Returns:
* The page allocated, or ERR_PTR
*/
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_data_page *bpage = NULL;
@@ -4637,7 +4638,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
*
* Free a page allocated from ring_buffer_alloc_read_page.
*/
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct buffer_data_page *bpage = data;
@@ -4697,7 +4698,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* >=0 if data has been transferred, returns the offset of consumed data.
* <0 if no data has been transferred.
*/
int ring_buffer_read_page(struct ring_buffer *buffer,
int ring_buffer_read_page(struct trace_buffer *buffer,
void **data_page, size_t len, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4868,12 +4869,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_page);
*/
int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
{
struct ring_buffer *buffer;
struct trace_buffer *buffer;
long nr_pages_same;
int cpu_i;
unsigned long nr_pages;
buffer = container_of(node, struct ring_buffer, node);
buffer = container_of(node, struct trace_buffer, node);
if (cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
@@ -4923,7 +4924,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
static struct task_struct *rb_threads[NR_CPUS] __initdata;
struct rb_test_data {
struct ring_buffer *buffer;
struct trace_buffer *buffer;
unsigned long events;
unsigned long bytes_written;
unsigned long bytes_alloc;
@@ -5065,7 +5066,7 @@ static __init int rb_hammer_test(void *arg)
static __init int test_ringbuffer(void)
{
struct task_struct *rb_hammer;
struct ring_buffer *buffer;
struct trace_buffer *buffer;
int cpu;
int ret = 0;

View File

@@ -29,7 +29,7 @@ static int reader_finish;
static DECLARE_COMPLETION(read_start);
static DECLARE_COMPLETION(read_done);
static struct ring_buffer *buffer;
static struct trace_buffer *buffer;
static struct task_struct *producer;
static struct task_struct *consumer;
static unsigned long read;

View File

@@ -0,0 +1,523 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test module for in-kernel sythetic event creation and generation.
*
* Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
*/
#include <linux/module.h>
#include <linux/trace_events.h>
/*
* This module is a simple test of basic functionality for in-kernel
* synthetic event creation and generation, the first and second tests
* using synth_event_gen_cmd_start() and synth_event_add_field(), the
* third uses synth_event_create() to do it all at once with a static
* field array.
*
* Following that are a few examples using the created events to test
* various ways of tracing a synthetic event.
*
* To test, select CONFIG_SYNTH_EVENT_GEN_TEST and build the module.
* Then:
*
* # insmod kernel/trace/synth_event_gen_test.ko
* # cat /sys/kernel/debug/tracing/trace
*
* You should see several events in the trace buffer -
* "create_synth_test", "empty_synth_test", and several instances of
* "gen_synth_test".
*
* To remove the events, remove the module:
*
* # rmmod synth_event_gen_test
*
*/
static struct trace_event_file *create_synth_test;
static struct trace_event_file *empty_synth_test;
static struct trace_event_file *gen_synth_test;
/*
* Test to make sure we can create a synthetic event, then add more
* fields.
*/
static int __init test_gen_synth_cmd(void)
{
struct dynevent_cmd cmd;
u64 vals[7];
char *buf;
int ret;
/* Create a buffer to hold the generated command */
buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Before generating the command, initialize the cmd object */
synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
/*
* Create the empty gen_synth_test synthetic event with the
* first 4 fields.
*/
ret = synth_event_gen_cmd_start(&cmd, "gen_synth_test", THIS_MODULE,
"pid_t", "next_pid_field",
"char[16]", "next_comm_field",
"u64", "ts_ns",
"u64", "ts_ms");
if (ret)
goto free;
/* Use synth_event_add_field to add the rest of the fields */
ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "int", "my_int_field");
if (ret)
goto free;
ret = synth_event_gen_cmd_end(&cmd);
if (ret)
goto free;
/*
* Now get the gen_synth_test event file. We need to prevent
* the instance and event from disappearing from underneath
* us, which trace_get_event_file() does (though in this case
* we're using the top-level instance which never goes away).
*/
gen_synth_test = trace_get_event_file(NULL, "synthetic",
"gen_synth_test");
if (IS_ERR(gen_synth_test)) {
ret = PTR_ERR(gen_synth_test);
goto delete;
}
/* Enable the event or you won't see anything */
ret = trace_array_set_clr_event(gen_synth_test->tr,
"synthetic", "gen_synth_test", true);
if (ret) {
trace_put_event_file(gen_synth_test);
goto delete;
}
/* Create some bogus values just for testing */
vals[0] = 777; /* next_pid_field */
vals[1] = (u64)"hula hoops"; /* next_comm_field */
vals[2] = 1000000; /* ts_ns */
vals[3] = 1000; /* ts_ms */
vals[4] = smp_processor_id(); /* cpu */
vals[5] = (u64)"thneed"; /* my_string_field */
vals[6] = 598; /* my_int_field */
/* Now generate a gen_synth_test event */
ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
out:
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("gen_synth_test");
free:
kfree(buf);
goto out;
}
/*
* Test to make sure we can create an initially empty synthetic event,
* then add all the fields.
*/
static int __init test_empty_synth_event(void)
{
struct dynevent_cmd cmd;
u64 vals[7];
char *buf;
int ret;
/* Create a buffer to hold the generated command */
buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Before generating the command, initialize the cmd object */
synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
/*
* Create the empty_synth_test synthetic event with no fields.
*/
ret = synth_event_gen_cmd_start(&cmd, "empty_synth_test", THIS_MODULE);
if (ret)
goto free;
/* Use synth_event_add_field to add all of the fields */
ret = synth_event_add_field(&cmd, "pid_t", "next_pid_field");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "char[16]", "next_comm_field");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "u64", "ts_ns");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "u64", "ts_ms");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
if (ret)
goto free;
ret = synth_event_add_field(&cmd, "int", "my_int_field");
if (ret)
goto free;
/* All fields have been added, close and register the synth event */
ret = synth_event_gen_cmd_end(&cmd);
if (ret)
goto free;
/*
* Now get the empty_synth_test event file. We need to
* prevent the instance and event from disappearing from
* underneath us, which trace_get_event_file() does (though in
* this case we're using the top-level instance which never
* goes away).
*/
empty_synth_test = trace_get_event_file(NULL, "synthetic",
"empty_synth_test");
if (IS_ERR(empty_synth_test)) {
ret = PTR_ERR(empty_synth_test);
goto delete;
}
/* Enable the event or you won't see anything */
ret = trace_array_set_clr_event(empty_synth_test->tr,
"synthetic", "empty_synth_test", true);
if (ret) {
trace_put_event_file(empty_synth_test);
goto delete;
}
/* Create some bogus values just for testing */
vals[0] = 777; /* next_pid_field */
vals[1] = (u64)"tiddlywinks"; /* next_comm_field */
vals[2] = 1000000; /* ts_ns */
vals[3] = 1000; /* ts_ms */
vals[4] = smp_processor_id(); /* cpu */
vals[5] = (u64)"thneed_2.0"; /* my_string_field */
vals[6] = 399; /* my_int_field */
/* Now trace an empty_synth_test event */
ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
out:
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("empty_synth_test");
free:
kfree(buf);
goto out;
}
static struct synth_field_desc create_synth_test_fields[] = {
{ .type = "pid_t", .name = "next_pid_field" },
{ .type = "char[16]", .name = "next_comm_field" },
{ .type = "u64", .name = "ts_ns" },
{ .type = "u64", .name = "ts_ms" },
{ .type = "unsigned int", .name = "cpu" },
{ .type = "char[64]", .name = "my_string_field" },
{ .type = "int", .name = "my_int_field" },
};
/*
* Test synthetic event creation all at once from array of field
* descriptors.
*/
static int __init test_create_synth_event(void)
{
u64 vals[7];
int ret;
/* Create the create_synth_test event with the fields above */
ret = synth_event_create("create_synth_test",
create_synth_test_fields,
ARRAY_SIZE(create_synth_test_fields),
THIS_MODULE);
if (ret)
goto out;
/*
* Now get the create_synth_test event file. We need to
* prevent the instance and event from disappearing from
* underneath us, which trace_get_event_file() does (though in
* this case we're using the top-level instance which never
* goes away).
*/
create_synth_test = trace_get_event_file(NULL, "synthetic",
"create_synth_test");
if (IS_ERR(create_synth_test)) {
ret = PTR_ERR(create_synth_test);
goto delete;
}
/* Enable the event or you won't see anything */
ret = trace_array_set_clr_event(create_synth_test->tr,
"synthetic", "create_synth_test", true);
if (ret) {
trace_put_event_file(create_synth_test);
goto delete;
}
/* Create some bogus values just for testing */
vals[0] = 777; /* next_pid_field */
vals[1] = (u64)"tiddlywinks"; /* next_comm_field */
vals[2] = 1000000; /* ts_ns */
vals[3] = 1000; /* ts_ms */
vals[4] = smp_processor_id(); /* cpu */
vals[5] = (u64)"thneed"; /* my_string_field */
vals[6] = 398; /* my_int_field */
/* Now generate a create_synth_test event */
ret = synth_event_trace_array(create_synth_test, vals, ARRAY_SIZE(vals));
out:
return ret;
delete:
/* We got an error after creating the event, delete it */
ret = synth_event_delete("create_synth_test");
goto out;
}
/*
* Test tracing a synthetic event by reserving trace buffer space,
* then filling in fields one after another.
*/
static int __init test_add_next_synth_val(void)
{
struct synth_event_trace_state trace_state;
int ret;
/* Start by reserving space in the trace buffer */
ret = synth_event_trace_start(gen_synth_test, &trace_state);
if (ret)
return ret;
/* Write some bogus values into the trace buffer, one after another */
/* next_pid_field */
ret = synth_event_add_next_val(777, &trace_state);
if (ret)
goto out;
/* next_comm_field */
ret = synth_event_add_next_val((u64)"slinky", &trace_state);
if (ret)
goto out;
/* ts_ns */
ret = synth_event_add_next_val(1000000, &trace_state);
if (ret)
goto out;
/* ts_ms */
ret = synth_event_add_next_val(1000, &trace_state);
if (ret)
goto out;
/* cpu */
ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
if (ret)
goto out;
/* my_string_field */
ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
if (ret)
goto out;
/* my_int_field */
ret = synth_event_add_next_val(395, &trace_state);
out:
/* Finally, commit the event */
ret = synth_event_trace_end(&trace_state);
return ret;
}
/*
* Test tracing a synthetic event by reserving trace buffer space,
* then filling in fields using field names, which can be done in any
* order.
*/
static int __init test_add_synth_val(void)
{
struct synth_event_trace_state trace_state;
int ret;
/* Start by reserving space in the trace buffer */
ret = synth_event_trace_start(gen_synth_test, &trace_state);
if (ret)
return ret;
/* Write some bogus values into the trace buffer, using field names */
ret = synth_event_add_val("ts_ns", 1000000, &trace_state);
if (ret)
goto out;
ret = synth_event_add_val("ts_ms", 1000, &trace_state);
if (ret)
goto out;
ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
if (ret)
goto out;
ret = synth_event_add_val("next_pid_field", 777, &trace_state);
if (ret)
goto out;
ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
&trace_state);
if (ret)
goto out;
ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
&trace_state);
if (ret)
goto out;
ret = synth_event_add_val("my_int_field", 3999, &trace_state);
out:
/* Finally, commit the event */
ret = synth_event_trace_end(&trace_state);
return ret;
}
/*
* Test tracing a synthetic event all at once from array of values.
*/
static int __init test_trace_synth_event(void)
{
int ret;
/* Trace some bogus values just for testing */
ret = synth_event_trace(create_synth_test, 7, /* number of values */
444, /* next_pid_field */
(u64)"clackers", /* next_comm_field */
1000000, /* ts_ns */
1000, /* ts_ms */
smp_processor_id(), /* cpu */
(u64)"Thneed", /* my_string_field */
999); /* my_int_field */
return ret;
}
static int __init synth_event_gen_test_init(void)
{
int ret;
ret = test_gen_synth_cmd();
if (ret)
return ret;
ret = test_empty_synth_event();
if (ret) {
WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
"synthetic",
"gen_synth_test", false));
trace_put_event_file(gen_synth_test);
WARN_ON(synth_event_delete("gen_synth_test"));
goto out;
}
ret = test_create_synth_event();
if (ret) {
WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
"synthetic",
"gen_synth_test", false));
trace_put_event_file(gen_synth_test);
WARN_ON(synth_event_delete("gen_synth_test"));
WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
"synthetic",
"empty_synth_test", false));
trace_put_event_file(empty_synth_test);
WARN_ON(synth_event_delete("empty_synth_test"));
goto out;
}
ret = test_add_next_synth_val();
WARN_ON(ret);
ret = test_add_synth_val();
WARN_ON(ret);
ret = test_trace_synth_event();
WARN_ON(ret);
out:
return ret;
}
static void __exit synth_event_gen_test_exit(void)
{
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
"synthetic",
"gen_synth_test", false));
/* Now give the file and instance back */
trace_put_event_file(gen_synth_test);
/* Now unregister and free the synthetic event */
WARN_ON(synth_event_delete("gen_synth_test"));
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
"synthetic",
"empty_synth_test", false));
/* Now give the file and instance back */
trace_put_event_file(empty_synth_test);
/* Now unregister and free the synthetic event */
WARN_ON(synth_event_delete("empty_synth_test"));
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(create_synth_test->tr,
"synthetic",
"create_synth_test", false));
/* Now give the file and instance back */
trace_put_event_file(create_synth_test);
/* Now unregister and free the synthetic event */
WARN_ON(synth_event_delete("create_synth_test"));
}
module_init(synth_event_gen_test_init)
module_exit(synth_event_gen_test_exit)
MODULE_AUTHOR("Tom Zanussi");
MODULE_DESCRIPTION("synthetic event generation test");
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load Diff

View File

@@ -93,6 +93,18 @@ enum trace_type {
#include "trace_entries.h"
/* Use this for memory failure errors */
#define MEM_FAIL(condition, fmt, ...) ({ \
static bool __section(.data.once) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
__warned = true; \
pr_err("ERROR: " fmt, ##__VA_ARGS__); \
} \
unlikely(__ret_warn_once); \
})
/*
* syscalls are special, and need special handling, this is why
* they are not included in trace_entries.h
@@ -175,9 +187,9 @@ struct trace_array_cpu {
struct tracer;
struct trace_option_dentry;
struct trace_buffer {
struct array_buffer {
struct trace_array *tr;
struct ring_buffer *buffer;
struct trace_buffer *buffer;
struct trace_array_cpu __percpu *data;
u64 time_start;
int cpu;
@@ -248,7 +260,7 @@ struct cond_snapshot {
struct trace_array {
struct list_head list;
char *name;
struct trace_buffer trace_buffer;
struct array_buffer array_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
/*
* The max_buffer is used to snapshot the trace when a maximum
@@ -256,12 +268,12 @@ struct trace_array {
* Some tracers will use this to store a maximum trace while
* it continues examining live traces.
*
* The buffers for the max_buffer are set up the same as the trace_buffer
* The buffers for the max_buffer are set up the same as the array_buffer
* When a snapshot is taken, the buffer of the max_buffer is swapped
* with the buffer of the trace_buffer and the buffers are reset for
* the trace_buffer so the tracing can continue.
* with the buffer of the array_buffer and the buffers are reset for
* the array_buffer so the tracing can continue.
*/
struct trace_buffer max_buffer;
struct array_buffer max_buffer;
bool allocated_snapshot;
#endif
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
@@ -345,6 +357,8 @@ extern struct mutex trace_types_lock;
extern int trace_array_get(struct trace_array *tr);
extern int tracing_check_open_get_tr(struct trace_array *tr);
extern struct trace_array *trace_array_find(const char *instance);
extern struct trace_array *trace_array_find_get(const char *instance);
extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
@@ -684,7 +698,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_online_cpus(struct array_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -704,7 +718,7 @@ struct dentry *tracing_init_dentry(void);
struct ring_buffer_event;
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
unsigned long flags,
@@ -716,7 +730,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event);
int trace_empty(struct trace_iterator *iter);
@@ -872,7 +886,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern int
trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args);
int trace_array_printk_buf(struct ring_buffer *buffer,
int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
@@ -949,22 +963,31 @@ extern void __trace_graph_return(struct trace_array *tr,
unsigned long flags, int pc);
#ifdef CONFIG_DYNAMIC_FTRACE
extern struct ftrace_hash *ftrace_graph_hash;
extern struct ftrace_hash *ftrace_graph_notrace_hash;
extern struct ftrace_hash __rcu *ftrace_graph_hash;
extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{
unsigned long addr = trace->func;
int ret = 0;
struct ftrace_hash *hash;
preempt_disable_notrace();
if (ftrace_hash_empty(ftrace_graph_hash)) {
/*
* Have to open code "rcu_dereference_sched()" because the
* function graph tracer can be called when RCU is not
* "watching".
* Protected with schedule_on_each_cpu(ftrace_sync)
*/
hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
if (ftrace_hash_empty(hash)) {
ret = 1;
goto out;
}
if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
if (ftrace_lookup_ip(hash, addr)) {
/*
* This needs to be cleared on the return functions
@@ -1000,10 +1023,20 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
int ret = 0;
struct ftrace_hash *notrace_hash;
preempt_disable_notrace();
if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
/*
* Have to open code "rcu_dereference_sched()" because the
* function graph tracer can be called when RCU is not
* "watching".
* Protected with schedule_on_each_cpu(ftrace_sync)
*/
notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
!preemptible());
if (ftrace_lookup_ip(notrace_hash, addr))
ret = 1;
preempt_enable_notrace();
@@ -1056,7 +1089,7 @@ struct ftrace_func_command {
extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct trace_array *tr)
{
return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
}
extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr,
@@ -1144,6 +1177,11 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd);
void ftrace_create_filter_files(struct ftrace_ops *ops,
struct dentry *parent);
void ftrace_destroy_filter_files(struct ftrace_ops *ops);
extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
#else
struct ftrace_func_command;
@@ -1366,17 +1404,17 @@ struct trace_subsystem_dir {
};
extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct trace_buffer *buffer,
struct ring_buffer_event *event);
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
static inline void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer *buffer,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
@@ -1389,7 +1427,7 @@ void trace_buffered_event_disable(void);
void trace_buffered_event_enable(void);
static inline void
__trace_event_discard_commit(struct ring_buffer *buffer,
__trace_event_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
if (this_cpu_read(trace_buffered_event) == event) {
@@ -1415,7 +1453,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer,
*/
static inline bool
__event_trigger_test_discard(struct trace_event_file *file,
struct ring_buffer *buffer,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
void *entry,
enum event_trigger_type *tt)
@@ -1450,7 +1488,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
*/
static inline void
event_trigger_unlock_commit(struct trace_event_file *file,
struct ring_buffer *buffer,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc)
{
@@ -1481,7 +1519,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
*/
static inline void
event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct ring_buffer *buffer,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc,
struct pt_regs *regs)
@@ -1892,6 +1930,15 @@ void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
/* Used from boot time tracer */
extern int trace_set_options(struct trace_array *tr, char *option);
extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
unsigned long size, int cpu_id);
extern int tracing_set_cpumask(struct trace_array *tr,
cpumask_var_t tracing_cpumask_new);
#define MAX_EVENT_NAME_LEN 64
extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
@@ -1949,6 +1996,9 @@ static inline const char *get_syscall_name(int syscall)
#ifdef CONFIG_EVENT_TRACING
void trace_event_init(void);
void trace_event_eval_update(struct trace_eval_map **map, int len);
/* Used from boot time tracer */
extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
extern int trigger_process_regex(struct trace_event_file *file, char *buff);
#else
static inline void __init trace_event_init(void) { }
static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }

334
kernel/trace/trace_boot.c Normal file
View File

@@ -0,0 +1,334 @@
// SPDX-License-Identifier: GPL-2.0
/*
* trace_boot.c
* Tracing kernel boot-time
*/
#define pr_fmt(fmt) "trace_boot: " fmt
#include <linux/bootconfig.h>
#include <linux/cpumask.h>
#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/trace.h>
#include <linux/trace_events.h>
#include "trace.h"
#define MAX_BUF_LEN 256
static void __init
trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
{
struct xbc_node *anode;
const char *p;
char buf[MAX_BUF_LEN];
unsigned long v = 0;
/* Common ftrace options */
xbc_node_for_each_array_value(node, "options", anode, p) {
if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
pr_err("String is too long: %s\n", p);
continue;
}
if (trace_set_options(tr, buf) < 0)
pr_err("Failed to set option: %s\n", buf);
}
p = xbc_node_find_value(node, "trace_clock", NULL);
if (p && *p != '\0') {
if (tracing_set_clock(tr, p) < 0)
pr_err("Failed to set trace clock: %s\n", p);
}
p = xbc_node_find_value(node, "buffer_size", NULL);
if (p && *p != '\0') {
v = memparse(p, NULL);
if (v < PAGE_SIZE)
pr_err("Buffer size is too small: %s\n", p);
if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
pr_err("Failed to resize trace buffer to %s\n", p);
}
p = xbc_node_find_value(node, "cpumask", NULL);
if (p && *p != '\0') {
cpumask_var_t new_mask;
if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
if (cpumask_parse(p, new_mask) < 0 ||
tracing_set_cpumask(tr, new_mask) < 0)
pr_err("Failed to set new CPU mask %s\n", p);
free_cpumask_var(new_mask);
}
}
}
#ifdef CONFIG_EVENT_TRACING
static void __init
trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
{
struct xbc_node *anode;
char buf[MAX_BUF_LEN];
const char *p;
xbc_node_for_each_array_value(node, "events", anode, p) {
if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
pr_err("String is too long: %s\n", p);
continue;
}
if (ftrace_set_clr_event(tr, buf, 1) < 0)
pr_err("Failed to enable event: %s\n", p);
}
}
#ifdef CONFIG_KPROBE_EVENTS
static int __init
trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
{
struct dynevent_cmd cmd;
struct xbc_node *anode;
char buf[MAX_BUF_LEN];
const char *val;
int ret;
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
if (ret)
return ret;
xbc_node_for_each_array_value(node, "probes", anode, val) {
ret = kprobe_event_add_field(&cmd, val);
if (ret)
return ret;
}
ret = kprobe_event_gen_cmd_end(&cmd);
if (ret)
pr_err("Failed to add probe: %s\n", buf);
return ret;
}
#else
static inline int __init
trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
{
pr_err("Kprobe event is not supported.\n");
return -ENOTSUPP;
}
#endif
#ifdef CONFIG_HIST_TRIGGERS
static int __init
trace_boot_add_synth_event(struct xbc_node *node, const char *event)
{
struct dynevent_cmd cmd;
struct xbc_node *anode;
char buf[MAX_BUF_LEN];
const char *p;
int ret;
synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
ret = synth_event_gen_cmd_start(&cmd, event, NULL);
if (ret)
return ret;
xbc_node_for_each_array_value(node, "fields", anode, p) {
ret = synth_event_add_field_str(&cmd, p);
if (ret)
return ret;
}
ret = synth_event_gen_cmd_end(&cmd);
if (ret < 0)
pr_err("Failed to add synthetic event: %s\n", buf);
return ret;
}
#else
static inline int __init
trace_boot_add_synth_event(struct xbc_node *node, const char *event)
{
pr_err("Synthetic event is not supported.\n");
return -ENOTSUPP;
}
#endif
static void __init
trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
struct xbc_node *enode)
{
struct trace_event_file *file;
struct xbc_node *anode;
char buf[MAX_BUF_LEN];
const char *p, *group, *event;
group = xbc_node_get_data(gnode);
event = xbc_node_get_data(enode);
if (!strcmp(group, "kprobes"))
if (trace_boot_add_kprobe_event(enode, event) < 0)
return;
if (!strcmp(group, "synthetic"))
if (trace_boot_add_synth_event(enode, event) < 0)
return;
mutex_lock(&event_mutex);
file = find_event_file(tr, group, event);
if (!file) {
pr_err("Failed to find event: %s:%s\n", group, event);
goto out;
}
p = xbc_node_find_value(enode, "filter", NULL);
if (p && *p != '\0') {
if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
pr_err("filter string is too long: %s\n", p);
else if (apply_event_filter(file, buf) < 0)
pr_err("Failed to apply filter: %s\n", buf);
}
xbc_node_for_each_array_value(enode, "actions", anode, p) {
if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
pr_err("action string is too long: %s\n", p);
else if (trigger_process_regex(file, buf) < 0)
pr_err("Failed to apply an action: %s\n", buf);
}
if (xbc_node_find_value(enode, "enable", NULL)) {
if (trace_event_enable_disable(file, 1, 0) < 0)
pr_err("Failed to enable event node: %s:%s\n",
group, event);
}
out:
mutex_unlock(&event_mutex);
}
static void __init
trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
{
struct xbc_node *gnode, *enode;
node = xbc_node_find_child(node, "event");
if (!node)
return;
/* per-event key starts with "event.GROUP.EVENT" */
xbc_node_for_each_child(node, gnode)
xbc_node_for_each_child(gnode, enode)
trace_boot_init_one_event(tr, gnode, enode);
}
#else
#define trace_boot_enable_events(tr, node) do {} while (0)
#define trace_boot_init_events(tr, node) do {} while (0)
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
static void __init
trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
{
struct xbc_node *anode;
const char *p;
char *q;
xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
q = kstrdup(p, GFP_KERNEL);
if (!q)
return;
if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
pr_err("Failed to add %s to ftrace filter\n", p);
else
ftrace_filter_param = true;
kfree(q);
}
xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
q = kstrdup(p, GFP_KERNEL);
if (!q)
return;
if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
pr_err("Failed to add %s to ftrace filter\n", p);
else
ftrace_filter_param = true;
kfree(q);
}
}
#else
#define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
#endif
static void __init
trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
{
const char *p;
trace_boot_set_ftrace_filter(tr, node);
p = xbc_node_find_value(node, "tracer", NULL);
if (p && *p != '\0') {
if (tracing_set_tracer(tr, p) < 0)
pr_err("Failed to set given tracer: %s\n", p);
}
}
static void __init
trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
{
trace_boot_set_instance_options(tr, node);
trace_boot_init_events(tr, node);
trace_boot_enable_events(tr, node);
trace_boot_enable_tracer(tr, node);
}
static void __init
trace_boot_init_instances(struct xbc_node *node)
{
struct xbc_node *inode;
struct trace_array *tr;
const char *p;
node = xbc_node_find_child(node, "instance");
if (!node)
return;
xbc_node_for_each_child(node, inode) {
p = xbc_node_get_data(inode);
if (!p || *p == '\0')
continue;
tr = trace_array_get_by_name(p);
if (!tr) {
pr_err("Failed to get trace instance %s\n", p);
continue;
}
trace_boot_init_one_instance(tr, inode);
trace_array_put(tr);
}
}
static int __init trace_boot_init(void)
{
struct xbc_node *trace_node;
struct trace_array *tr;
trace_node = xbc_find_node("ftrace");
if (!trace_node)
return 0;
tr = top_trace_array();
if (!tr)
return 0;
/* Global trace array is also one instance */
trace_boot_init_one_instance(tr, trace_node);
trace_boot_init_instances(trace_node);
return 0;
}
fs_initcall(trace_boot_init);

View File

@@ -32,10 +32,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
struct trace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer;
struct trace_buffer *buffer;
struct trace_array_cpu *data;
struct ring_buffer_event *event;
struct trace_branch *entry;
struct ring_buffer *buffer;
unsigned long flags;
int pc;
const char *p;
@@ -55,12 +55,12 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
raw_local_irq_save(flags);
current->trace_recursion |= TRACE_BRANCH_BIT;
data = this_cpu_ptr(tr->trace_buffer.data);
data = this_cpu_ptr(tr->array_buffer.data);
if (atomic_read(&data->disabled))
goto out;
pc = preempt_count();
buffer = tr->trace_buffer.buffer;
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)

View File

@@ -223,3 +223,215 @@ static __init int init_dynamic_event(void)
return 0;
}
fs_initcall(init_dynamic_event);
/**
* dynevent_arg_add - Add an arg to a dynevent_cmd
* @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
* @arg: The argument to append to the current cmd
* @check_arg: An (optional) pointer to a function checking arg sanity
*
* Append an argument to a dynevent_cmd. The argument string will be
* appended to the current cmd string, followed by a separator, if
* applicable. Before the argument is added, the @check_arg function,
* if present, will be used to check the sanity of the current arg
* string.
*
* The cmd string and separator should be set using the
* dynevent_arg_init() before any arguments are added using this
* function.
*
* Return: 0 if successful, error otherwise.
*/
int dynevent_arg_add(struct dynevent_cmd *cmd,
struct dynevent_arg *arg,
dynevent_check_arg_fn_t check_arg)
{
int ret = 0;
if (check_arg) {
ret = check_arg(arg);
if (ret)
return ret;
}
ret = seq_buf_printf(&cmd->seq, " %s%c", arg->str, arg->separator);
if (ret) {
pr_err("String is too long: %s%c\n", arg->str, arg->separator);
return -E2BIG;
}
return ret;
}
/**
* dynevent_arg_pair_add - Add an arg pair to a dynevent_cmd
* @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
* @arg_pair: The argument pair to append to the current cmd
* @check_arg: An (optional) pointer to a function checking arg sanity
*
* Append an argument pair to a dynevent_cmd. An argument pair
* consists of a left-hand-side argument and a right-hand-side
* argument separated by an operator, which can be whitespace, all
* followed by a separator, if applicable. This can be used to add
* arguments of the form 'type variable_name;' or 'x+y'.
*
* The lhs argument string will be appended to the current cmd string,
* followed by an operator, if applicable, followd by the rhs string,
* followed finally by a separator, if applicable. Before the
* argument is added, the @check_arg function, if present, will be
* used to check the sanity of the current arg strings.
*
* The cmd strings, operator, and separator should be set using the
* dynevent_arg_pair_init() before any arguments are added using this
* function.
*
* Return: 0 if successful, error otherwise.
*/
int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
struct dynevent_arg_pair *arg_pair,
dynevent_check_arg_fn_t check_arg)
{
int ret = 0;
if (check_arg) {
ret = check_arg(arg_pair);
if (ret)
return ret;
}
ret = seq_buf_printf(&cmd->seq, " %s%c%s%c", arg_pair->lhs,
arg_pair->operator, arg_pair->rhs,
arg_pair->separator);
if (ret) {
pr_err("field string is too long: %s%c%s%c\n", arg_pair->lhs,
arg_pair->operator, arg_pair->rhs,
arg_pair->separator);
return -E2BIG;
}
return ret;
}
/**
* dynevent_str_add - Add a string to a dynevent_cmd
* @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
* @str: The string to append to the current cmd
*
* Append a string to a dynevent_cmd. The string will be appended to
* the current cmd string as-is, with nothing prepended or appended.
*
* Return: 0 if successful, error otherwise.
*/
int dynevent_str_add(struct dynevent_cmd *cmd, const char *str)
{
int ret = 0;
ret = seq_buf_puts(&cmd->seq, str);
if (ret) {
pr_err("String is too long: %s\n", str);
return -E2BIG;
}
return ret;
}
/**
* dynevent_cmd_init - Initialize a dynevent_cmd object
* @cmd: A pointer to the dynevent_cmd struct representing the cmd
* @buf: A pointer to the buffer to generate the command into
* @maxlen: The length of the buffer the command will be generated into
* @type: The type of the cmd, checked against further operations
* @run_command: The type-specific function that will actually run the command
*
* Initialize a dynevent_cmd. A dynevent_cmd is used to build up and
* run dynamic event creation commands, such as commands for creating
* synthetic and kprobe events. Before calling any of the functions
* used to build the command, a dynevent_cmd object should be
* instantiated and initialized using this function.
*
* The initialization sets things up by saving a pointer to the
* user-supplied buffer and its length via the @buf and @maxlen
* params, and by saving the cmd-specific @type and @run_command
* params which are used to check subsequent dynevent_cmd operations
* and actually run the command when complete.
*/
void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
enum dynevent_type type,
dynevent_create_fn_t run_command)
{
memset(cmd, '\0', sizeof(*cmd));
seq_buf_init(&cmd->seq, buf, maxlen);
cmd->type = type;
cmd->run_command = run_command;
}
/**
* dynevent_arg_init - Initialize a dynevent_arg object
* @arg: A pointer to the dynevent_arg struct representing the arg
* @separator: An (optional) separator, appended after adding the arg
*
* Initialize a dynevent_arg object. A dynevent_arg represents an
* object used to append single arguments to the current command
* string. After the arg string is successfully appended to the
* command string, the optional @separator is appended. If no
* separator was specified when initializing the arg, a space will be
* appended.
*/
void dynevent_arg_init(struct dynevent_arg *arg,
char separator)
{
memset(arg, '\0', sizeof(*arg));
if (!separator)
separator = ' ';
arg->separator = separator;
}
/**
* dynevent_arg_pair_init - Initialize a dynevent_arg_pair object
* @arg_pair: A pointer to the dynevent_arg_pair struct representing the arg
* @operator: An (optional) operator, appended after adding the first arg
* @separator: An (optional) separator, appended after adding the second arg
*
* Initialize a dynevent_arg_pair object. A dynevent_arg_pair
* represents an object used to append argument pairs such as 'type
* variable_name;' or 'x+y' to the current command string. An
* argument pair consists of a left-hand-side argument and a
* right-hand-side argument separated by an operator, which can be
* whitespace, all followed by a separator, if applicable. After the
* first arg string is successfully appended to the command string,
* the optional @operator is appended, followed by the second arg and
* and optional @separator. If no separator was specified when
* initializing the arg, a space will be appended.
*/
void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
char operator, char separator)
{
memset(arg_pair, '\0', sizeof(*arg_pair));
if (!operator)
operator = ' ';
arg_pair->operator = operator;
if (!separator)
separator = ' ';
arg_pair->separator = separator;
}
/**
* dynevent_create - Create the dynamic event contained in dynevent_cmd
* @cmd: The dynevent_cmd object containing the dynamic event creation command
*
* Once a dynevent_cmd object has been successfully built up via the
* dynevent_cmd_init(), dynevent_arg_add() and dynevent_arg_pair_add()
* functions, this function runs the final command to actually create
* the event.
*
* Return: 0 if the event was successfully created, error otherwise.
*/
int dynevent_create(struct dynevent_cmd *cmd)
{
return cmd->run_command(cmd);
}
EXPORT_SYMBOL_GPL(dynevent_create);

View File

@@ -117,4 +117,36 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type);
#define for_each_dyn_event_safe(pos, n) \
list_for_each_entry_safe(pos, n, &dyn_event_list, list)
extern void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
enum dynevent_type type,
dynevent_create_fn_t run_command);
typedef int (*dynevent_check_arg_fn_t)(void *data);
struct dynevent_arg {
const char *str;
char separator; /* e.g. ';', ',', or nothing */
};
extern void dynevent_arg_init(struct dynevent_arg *arg,
char separator);
extern int dynevent_arg_add(struct dynevent_cmd *cmd,
struct dynevent_arg *arg,
dynevent_check_arg_fn_t check_arg);
struct dynevent_arg_pair {
const char *lhs;
const char *rhs;
char operator; /* e.g. '=' or nothing */
char separator; /* e.g. ';', ',', or nothing */
};
extern void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
char operator, char separator);
extern int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
struct dynevent_arg_pair *arg_pair,
dynevent_check_arg_fn_t check_arg);
extern int dynevent_str_add(struct dynevent_cmd *cmd, const char *str);
#endif

View File

@@ -164,7 +164,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
F_STRUCT(
__field( int, size )
__dynamic_array(unsigned long, caller )
__array( unsigned long, caller, FTRACE_STACK_ENTRIES )
),
F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"

View File

@@ -238,7 +238,7 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
if (!pid_list)
return false;
data = this_cpu_ptr(tr->trace_buffer.data);
data = this_cpu_ptr(tr->array_buffer.data);
return data->ignore_pid;
}
@@ -273,6 +273,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
if (!fbuffer->event)
return NULL;
fbuffer->regs = NULL;
fbuffer->entry = ring_buffer_event_data(fbuffer->event);
return fbuffer->entry;
}
@@ -547,7 +548,7 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, prev) &&
trace_ignore_this_task(pid_list, next));
}
@@ -561,7 +562,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, next));
}
@@ -572,12 +573,12 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
struct trace_pid_list *pid_list;
/* Nothing to do if we are already tracing */
if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, task));
}
@@ -588,13 +589,13 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
struct trace_pid_list *pid_list;
/* Nothing to do if we are not tracing */
if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
if (this_cpu_read(tr->array_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
/* Set tracing if current is enabled */
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, current));
}
@@ -626,7 +627,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
}
for_each_possible_cpu(cpu)
per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
rcu_assign_pointer(tr->filtered_pids, NULL);
@@ -1595,7 +1596,7 @@ static void ignore_task_cpu(void *data)
pid_list = rcu_dereference_protected(tr->filtered_pids,
mutex_is_locked(&event_mutex));
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, current));
}
@@ -2553,6 +2554,91 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
return file;
}
/**
* trace_get_event_file - Find and return a trace event file
* @instance: The name of the trace instance containing the event
* @system: The name of the system containing the event
* @event: The name of the event
*
* Return a trace event file given the trace instance name, trace
* system, and trace event name. If the instance name is NULL, it
* refers to the top-level trace array.
*
* This function will look it up and return it if found, after calling
* trace_array_get() to prevent the instance from going away, and
* increment the event's module refcount to prevent it from being
* removed.
*
* To release the file, call trace_put_event_file(), which will call
* trace_array_put() and decrement the event's module refcount.
*
* Return: The trace event on success, ERR_PTR otherwise.
*/
struct trace_event_file *trace_get_event_file(const char *instance,
const char *system,
const char *event)
{
struct trace_array *tr = top_trace_array();
struct trace_event_file *file = NULL;
int ret = -EINVAL;
if (instance) {
tr = trace_array_find_get(instance);
if (!tr)
return ERR_PTR(-ENOENT);
} else {
ret = trace_array_get(tr);
if (ret)
return ERR_PTR(ret);
}
mutex_lock(&event_mutex);
file = find_event_file(tr, system, event);
if (!file) {
trace_array_put(tr);
ret = -EINVAL;
goto out;
}
/* Don't let event modules unload while in use */
ret = try_module_get(file->event_call->mod);
if (!ret) {
trace_array_put(tr);
ret = -EBUSY;
goto out;
}
ret = 0;
out:
mutex_unlock(&event_mutex);
if (ret)
file = ERR_PTR(ret);
return file;
}
EXPORT_SYMBOL_GPL(trace_get_event_file);
/**
* trace_put_event_file - Release a file from trace_get_event_file()
* @file: The trace event file
*
* If a file was retrieved using trace_get_event_file(), this should
* be called when it's no longer needed. It will cancel the previous
* trace_array_get() called by that function, and decrement the
* event's module refcount.
*/
void trace_put_event_file(struct trace_event_file *file)
{
mutex_lock(&event_mutex);
module_put(file->event_call->mod);
mutex_unlock(&event_mutex);
trace_array_put(file->tr);
}
EXPORT_SYMBOL_GPL(trace_put_event_file);
#ifdef CONFIG_DYNAMIC_FTRACE
/* Avoid typos */
@@ -3409,8 +3495,8 @@ static void __init
function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct trace_buffer *buffer;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct ftrace_entry *entry;
unsigned long flags;
long disabled;

File diff suppressed because it is too large Load Diff

View File

@@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
{
struct trace_event_file *event_file = event_file_data(m->private);
if (t == SHOW_AVAILABLE_TRIGGERS)
if (t == SHOW_AVAILABLE_TRIGGERS) {
(*pos)++;
return NULL;
}
return seq_list_next(t, &event_file->triggers, pos);
}
@@ -213,7 +214,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
return ret;
}
static int trigger_process_regex(struct trace_event_file *file, char *buff)
int trigger_process_regex(struct trace_event_file *file, char *buff)
{
char *command, *next = buff;
struct event_command *p;

View File

@@ -101,7 +101,7 @@ static int function_trace_init(struct trace_array *tr)
ftrace_init_array_ops(tr, func);
tr->trace_buffer.cpu = get_cpu();
tr->array_buffer.cpu = get_cpu();
put_cpu();
tracing_start_cmdline_record();
@@ -118,7 +118,7 @@ static void function_trace_reset(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
{
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
}
static void
@@ -143,7 +143,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
goto out;
cpu = smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (!atomic_read(&data->disabled)) {
local_save_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc);
@@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {

View File

@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see
* the next one.
*/
ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
}
@@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
{
unsigned long long usecs;
usecs = iter->ts - iter->trace_buffer->time_start;
usecs = iter->ts - iter->array_buffer->time_start;
do_div(usecs, NSEC_PER_USEC);
trace_seq_printf(s, "%9llu us | ", usecs);

View File

@@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
{
struct trace_array *tr = hwlat_trace;
struct trace_event_call *call = &event_hwlat;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct hwlat_entry *entry;
unsigned long flags;

View File

@@ -122,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
if (!irqs_disabled_flags(*flags) && !preempt_count())
return 0;
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1))
@@ -167,7 +167,7 @@ static int irqsoff_display_graph(struct trace_array *tr, int set)
per_cpu(tracing_cpu, cpu) = 0;
tr->max_latency = 0;
tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
return start_irqsoff_tracer(irqsoff_trace, set);
}
@@ -382,7 +382,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
if (per_cpu(tracing_cpu, cpu))
return;
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (unlikely(!data) || atomic_read(&data->disabled))
return;
@@ -420,7 +420,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
if (!tracer_enabled || !tracing_is_enabled())
return;
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (unlikely(!data) ||
!data->critical_start || atomic_read(&data->disabled))

View File

@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] =
ring_buffer_read_prepare(iter.trace_buffer->buffer,
ring_buffer_read_prepare(iter.array_buffer->buffer,
cpu, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu);
@@ -51,7 +51,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
} else {
iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] =
ring_buffer_read_prepare(iter.trace_buffer->buffer,
ring_buffer_read_prepare(iter.array_buffer->buffer,
cpu_file, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file);
@@ -124,7 +124,7 @@ static int kdb_ftdump(int argc, const char **argv)
iter.buffer_iter = buffer_iter;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
/* A negative skip_entries means skip all but the last entries */
@@ -139,7 +139,7 @@ static int kdb_ftdump(int argc, const char **argv)
ftrace_dump_buf(skip_entries, cpu_file);
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
kdb_trap_printk--;

View File

@@ -22,7 +22,6 @@
#define KPROBE_EVENT_SYSTEM "kprobes"
#define KRETPROBE_MAXACTIVE_MAX 4096
#define MAX_KPROBE_CMDLINE_SIZE 1024
/* Kprobe early definition from command line */
static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
@@ -902,6 +901,167 @@ static int create_or_delete_trace_kprobe(int argc, char **argv)
return ret == -ECANCELED ? -EINVAL : ret;
}
static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
{
return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
}
/**
* kprobe_event_cmd_init - Initialize a kprobe event command object
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @buf: A pointer to the buffer used to build the command
* @maxlen: The length of the buffer passed in @buf
*
* Initialize a synthetic event command object. Use this before
* calling any of the other kprobe_event functions.
*/
void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
{
dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
trace_kprobe_run_command);
}
EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
/**
* __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @name: The name of the kprobe event
* @loc: The location of the kprobe event
* @kretprobe: Is this a return probe?
* @args: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
* adds a NULL to the end of the arg list. If this function is used
* directly, make sure the last arg in the variable arg list is NULL.
*
* Generate a kprobe event command to be executed by
* kprobe_event_gen_cmd_end(). This function can be used to generate the
* complete command or only the first part of it; in the latter case,
* kprobe_event_add_fields() can be used to add more fields following this.
*
* Return: 0 if successful, error otherwise.
*/
int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
const char *name, const char *loc, ...)
{
char buf[MAX_EVENT_NAME_LEN];
struct dynevent_arg arg;
va_list args;
int ret;
if (cmd->type != DYNEVENT_TYPE_KPROBE)
return -EINVAL;
if (kretprobe)
snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
else
snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
ret = dynevent_str_add(cmd, buf);
if (ret)
return ret;
dynevent_arg_init(&arg, 0);
arg.str = loc;
ret = dynevent_arg_add(cmd, &arg, NULL);
if (ret)
return ret;
va_start(args, loc);
for (;;) {
const char *field;
field = va_arg(args, const char *);
if (!field)
break;
if (++cmd->n_fields > MAX_TRACE_ARGS) {
ret = -EINVAL;
break;
}
arg.str = field;
ret = dynevent_arg_add(cmd, &arg, NULL);
if (ret)
break;
}
va_end(args);
return ret;
}
EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
/**
* __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @args: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_add_fields() wrapper, which
* automatically adds a NULL to the end of the arg list. If this
* function is used directly, make sure the last arg in the variable
* arg list is NULL.
*
* Add probe fields to an existing kprobe command using a variable
* list of args. Fields are added in the same order they're listed.
*
* Return: 0 if successful, error otherwise.
*/
int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
{
struct dynevent_arg arg;
va_list args;
int ret;
if (cmd->type != DYNEVENT_TYPE_KPROBE)
return -EINVAL;
dynevent_arg_init(&arg, 0);
va_start(args, cmd);
for (;;) {
const char *field;
field = va_arg(args, const char *);
if (!field)
break;
if (++cmd->n_fields > MAX_TRACE_ARGS) {
ret = -EINVAL;
break;
}
arg.str = field;
ret = dynevent_arg_add(cmd, &arg, NULL);
if (ret)
break;
}
va_end(args);
return ret;
}
EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
/**
* kprobe_event_delete - Delete a kprobe event
* @name: The name of the kprobe event to delete
*
* Delete a kprobe event with the give @name from kernel code rather
* than directly from the command line.
*
* Return: 0 if successful, error otherwise.
*/
int kprobe_event_delete(const char *name)
{
char buf[MAX_EVENT_NAME_LEN];
snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
return trace_run_command(buf, create_or_delete_trace_kprobe);
}
EXPORT_SYMBOL_GPL(kprobe_event_delete);
static int trace_kprobe_release(struct dyn_event *ev)
{
struct trace_kprobe *tk = to_trace_kprobe(ev);
@@ -1175,35 +1335,35 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
struct trace_event_file *trace_file)
{
struct kprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
int size, dsize, pc;
unsigned long irq_flags;
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
struct trace_event_buffer fbuffer;
int dsize;
WARN_ON(call != trace_file->event_call);
if (trace_trigger_soft_disabled(trace_file))
return;
local_save_flags(irq_flags);
pc = preempt_count();
local_save_flags(fbuffer.flags);
fbuffer.pc = preempt_count();
fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
size = sizeof(*entry) + tk->tp.size + dsize;
event = trace_event_buffer_lock_reserve(&buffer, trace_file,
call->event.type,
size, irq_flags, pc);
if (!event)
fbuffer.event =
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type,
sizeof(*entry) + tk->tp.size + dsize,
fbuffer.flags, fbuffer.pc);
if (!fbuffer.event)
return;
entry = ring_buffer_event_data(event);
fbuffer.regs = regs;
entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
entry->ip = (unsigned long)tk->rp.kp.addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
event_trigger_unlock_commit_regs(trace_file, buffer, event,
entry, irq_flags, pc, regs);
trace_event_buffer_commit(&fbuffer);
}
static void
@@ -1223,36 +1383,35 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct trace_event_file *trace_file)
{
struct kretprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
int size, pc, dsize;
unsigned long irq_flags;
struct trace_event_buffer fbuffer;
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
int dsize;
WARN_ON(call != trace_file->event_call);
if (trace_trigger_soft_disabled(trace_file))
return;
local_save_flags(irq_flags);
pc = preempt_count();
local_save_flags(fbuffer.flags);
fbuffer.pc = preempt_count();
fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
size = sizeof(*entry) + tk->tp.size + dsize;
event = trace_event_buffer_lock_reserve(&buffer, trace_file,
call->event.type,
size, irq_flags, pc);
if (!event)
fbuffer.event =
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type,
sizeof(*entry) + tk->tp.size + dsize,
fbuffer.flags, fbuffer.pc);
if (!fbuffer.event)
return;
entry = ring_buffer_event_data(event);
fbuffer.regs = regs;
entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
entry->func = (unsigned long)tk->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
event_trigger_unlock_commit_regs(trace_file, buffer, event,
entry, irq_flags, pc, regs);
trace_event_buffer_commit(&fbuffer);
}
static void
@@ -1698,11 +1857,12 @@ static __init void setup_boot_kprobe_events(void)
enable_boot_kprobe_events();
}
/* Make a tracefs interface for controlling probe points */
static __init int init_kprobe_trace(void)
/*
* Register dynevent at subsys_initcall. This allows kernel to setup kprobe
* events in fs_initcall without tracefs.
*/
static __init int init_kprobe_trace_early(void)
{
struct dentry *d_tracer;
struct dentry *entry;
int ret;
ret = dyn_event_register(&trace_kprobe_ops);
@@ -1712,6 +1872,16 @@ static __init int init_kprobe_trace(void)
if (register_module_notifier(&trace_kprobe_module_nb))
return -EINVAL;
return 0;
}
subsys_initcall(init_kprobe_trace_early);
/* Make a tracefs interface for controlling probe points */
static __init int init_kprobe_trace(void)
{
struct dentry *d_tracer;
struct dentry *entry;
d_tracer = tracing_init_dentry();
if (IS_ERR(d_tracer))
return 0;

View File

@@ -32,7 +32,7 @@ static void mmio_reset_data(struct trace_array *tr)
overrun_detected = false;
prev_overruns = 0;
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
}
static int mmio_trace_init(struct trace_array *tr)
@@ -122,7 +122,7 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter)
{
unsigned long cnt = atomic_xchg(&dropped_count, 0);
unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
if (over > prev_overruns)
cnt += over - prev_overruns;
@@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct mmiotrace_rw *rw)
{
struct trace_event_call *call = &event_mmiotrace_rw;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
int pc = preempt_count();
@@ -318,7 +318,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
void mmio_trace_rw(struct mmiotrace_rw *rw)
{
struct trace_array *tr = mmio_trace_array;
struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
__trace_mmiotrace_rw(tr, data, rw);
}
@@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct mmiotrace_map *map)
{
struct trace_event_call *call = &event_mmiotrace_map;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
int pc = preempt_count();
@@ -351,7 +351,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
struct trace_array_cpu *data;
preempt_disable();
data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
__trace_mmiotrace_map(tr, data, map);
preempt_enable();
}

View File

@@ -538,7 +538,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
struct trace_array *tr = iter->tr;
unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
unsigned long long rel_ts = next_ts - iter->ts;
struct trace_seq *s = &iter->seq;

View File

@@ -89,8 +89,10 @@ static void tracing_sched_unregister(void)
static void tracing_start_sched_switch(int ops)
{
bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
bool sched_register;
mutex_lock(&sched_register_mutex);
sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
switch (ops) {
case RECORD_CMDLINE:

View File

@@ -82,7 +82,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (cpu != wakeup_current_cpu)
goto out_enable;
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1))
goto out;
@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
unsigned long flags, int pc)
{
struct trace_event_call *call = &event_context_switch;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct trace_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc);
@@ -459,7 +459,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (likely(disabled != 1))
goto out;
@@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
goto out_unlock;
/* The task we are waiting for is waking up */
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
@@ -494,7 +494,7 @@ out_unlock:
arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void __wakeup_reset(struct trace_array *tr)
@@ -513,7 +513,7 @@ static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
local_irq_save(flags);
arch_spin_lock(&wakeup_lock);
@@ -551,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
return;
pc = preempt_count();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
@@ -583,7 +583,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
local_save_flags(flags);
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
__trace_stack(wakeup_trace, flags, 0, pc);
@@ -598,7 +598,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
out_locked:
arch_spin_unlock(&wakeup_lock);
out:
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void start_wakeup_tracer(struct trace_array *tr)

View File

@@ -23,7 +23,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
return 0;
}
static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
{
struct ring_buffer_event *event;
struct trace_entry *entry;
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
* Test the trace buffer to see if all the elements
* are still sane.
*/
static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
{
unsigned long flags, cnt = 0;
int cpu, ret = 0;
@@ -362,7 +362,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
msleep(100);
/* we should have nothing in the buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
if (ret)
goto out;
@@ -383,7 +383,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
ftrace_enabled = 0;
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
ftrace_enabled = 1;
tracing_start();
@@ -682,7 +682,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ftrace_enabled = 0;
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
ftrace_enabled = 1;
trace->reset(tr);
@@ -768,7 +768,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
* Simulate the init() callback but we attach a watchdog callback
* to detect and recover from possible hangs
*/
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
set_graph_array(tr);
ret = register_ftrace_graph(&fgraph_ops);
if (ret) {
@@ -790,7 +790,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
/* Need to also simulate the tr->reset to remove this fgraph_ops */
tracing_stop_cmdline_record();
@@ -848,7 +848,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
@@ -910,7 +910,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
@@ -976,7 +976,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (ret)
goto out;
@@ -1006,7 +1006,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (ret)
goto out;
@@ -1136,7 +1136,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
@@ -1177,7 +1177,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
trace->reset(tr);
tracing_start();

View File

@@ -30,9 +30,6 @@
/* How much buffer is left on the trace_seq? */
#define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq)
/* How much buffer is written? */
#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq)
/*
* trace_seq should work with being initialized with 0s.
*/

View File

@@ -280,18 +280,22 @@ static int tracing_stat_init(void)
d_tracing = tracing_init_dentry();
if (IS_ERR(d_tracing))
return 0;
return -ENODEV;
stat_dir = tracefs_create_dir("trace_stat", d_tracing);
if (!stat_dir)
if (!stat_dir) {
pr_warn("Could not create tracefs 'trace_stat' entry\n");
return -ENOMEM;
}
return 0;
}
static int init_stat_file(struct stat_session *session)
{
if (!stat_dir && tracing_stat_init())
return -ENODEV;
int ret;
if (!stat_dir && (ret = tracing_stat_init()))
return ret;
session->file = tracefs_create_file(session->ts->name, 0644,
stat_dir,
@@ -304,7 +308,7 @@ static int init_stat_file(struct stat_session *session)
int register_stat_tracer(struct tracer_stat *trace)
{
struct stat_session *session, *node;
int ret;
int ret = -EINVAL;
if (!trace)
return -EINVAL;
@@ -315,17 +319,15 @@ int register_stat_tracer(struct tracer_stat *trace)
/* Already registered? */
mutex_lock(&all_stat_sessions_mutex);
list_for_each_entry(node, &all_stat_sessions, session_list) {
if (node->ts == trace) {
mutex_unlock(&all_stat_sessions_mutex);
return -EINVAL;
}
if (node->ts == trace)
goto out;
}
mutex_unlock(&all_stat_sessions_mutex);
ret = -ENOMEM;
/* Init the session */
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
goto out;
session->ts = trace;
INIT_LIST_HEAD(&session->session_list);
@@ -334,15 +336,16 @@ int register_stat_tracer(struct tracer_stat *trace)
ret = init_stat_file(session);
if (ret) {
destroy_session(session);
return ret;
goto out;
}
ret = 0;
/* Register */
mutex_lock(&all_stat_sessions_mutex);
list_add_tail(&session->session_list, &all_stat_sessions);
out:
mutex_unlock(&all_stat_sessions_mutex);
return 0;
return ret;
}
void unregister_stat_tracer(struct tracer_stat *trace)

View File

@@ -297,7 +297,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct trace_buffer *buffer;
unsigned long irq_flags;
unsigned long args[6];
int pc;
@@ -325,7 +325,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
local_save_flags(irq_flags);
pc = preempt_count();
buffer = tr->trace_buffer.buffer;
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
sys_data->enter_event->event.type, size, irq_flags, pc);
if (!event)
@@ -347,7 +347,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct trace_buffer *buffer;
unsigned long irq_flags;
int pc;
int syscall_nr;
@@ -371,7 +371,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
local_save_flags(irq_flags);
pc = preempt_count();
buffer = tr->trace_buffer.buffer;
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
sys_data->exit_event->event.type, sizeof(*entry),
irq_flags, pc);

View File

@@ -931,8 +931,8 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
struct trace_event_file *trace_file)
{
struct uprobe_trace_entry_head *entry;
struct trace_buffer *buffer;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
void *data;
int size, esize;
struct trace_event_call *call = trace_probe_event_call(&tu->tp);