mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
tracing: Kill unused and puzzled sample code in ftrace.h
When doing per-cpu helper optimizing work, find that this code is so puzzled. 1. It's mark as comment text, maybe a sample function for guidelines or a todo work. 2. But, this sample code is odd where struct perf_trace_buf is nonexistent. commit ce71b9 delete struct perf_trace_buf definition. Author: Frederic Weisbecker <fweisbec@gmail.com> Date: Sun Nov 22 05:26:55 2009 +0100 tracing: Use the perf recursion protection from trace event Is it necessary to keep there? just compile test. Link: http://lkml.kernel.org/r/50949FC9.6050202@gmail.com Signed-off-by: Shan Wei <davidshan@tencent.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
11043d8b12
commit
1c7d667324
@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
/*
|
||||
* Define the insertion callback to perf events
|
||||
*
|
||||
* The job is very similar to ftrace_raw_event_<call> except that we don't
|
||||
* insert in the ring buffer but in a perf counter.
|
||||
*
|
||||
* static void ftrace_perf_<call>(proto)
|
||||
* {
|
||||
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
|
||||
* struct ftrace_event_call *event_call = &event_<call>;
|
||||
* extern void perf_tp_event(int, u64, u64, void *, int);
|
||||
* struct ftrace_raw_##call *entry;
|
||||
* struct perf_trace_buf *trace_buf;
|
||||
* u64 __addr = 0, __count = 1;
|
||||
* unsigned long irq_flags;
|
||||
* struct trace_entry *ent;
|
||||
* int __entry_size;
|
||||
* int __data_size;
|
||||
* int __cpu
|
||||
* int pc;
|
||||
*
|
||||
* pc = preempt_count();
|
||||
*
|
||||
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
|
||||
*
|
||||
* // Below we want to get the aligned size by taking into account
|
||||
* // the u32 field that will later store the buffer size
|
||||
* __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
|
||||
* sizeof(u64));
|
||||
* __entry_size -= sizeof(u32);
|
||||
*
|
||||
* // Protect the non nmi buffer
|
||||
* // This also protects the rcu read side
|
||||
* local_irq_save(irq_flags);
|
||||
* __cpu = smp_processor_id();
|
||||
*
|
||||
* if (in_nmi())
|
||||
* trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
|
||||
* else
|
||||
* trace_buf = rcu_dereference_sched(perf_trace_buf);
|
||||
*
|
||||
* if (!trace_buf)
|
||||
* goto end;
|
||||
*
|
||||
* trace_buf = per_cpu_ptr(trace_buf, __cpu);
|
||||
*
|
||||
* // Avoid recursion from perf that could mess up the buffer
|
||||
* if (trace_buf->recursion++)
|
||||
* goto end_recursion;
|
||||
*
|
||||
* raw_data = trace_buf->buf;
|
||||
*
|
||||
* // Make recursion update visible before entering perf_tp_event
|
||||
* // so that we protect from perf recursions.
|
||||
*
|
||||
* barrier();
|
||||
*
|
||||
* //zero dead bytes from alignment to avoid stack leak to userspace:
|
||||
* *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
|
||||
* entry = (struct ftrace_raw_<call> *)raw_data;
|
||||
* ent = &entry->ent;
|
||||
* tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
* ent->type = event_call->id;
|
||||
*
|
||||
* <tstruct> <- do some jobs with dynamic arrays
|
||||
*
|
||||
* <assign> <- affect our values
|
||||
*
|
||||
* perf_tp_event(event_call->id, __addr, __count, entry,
|
||||
* __entry_size); <- submit them to perf counter
|
||||
*
|
||||
* }
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user