tracing/function-graph-tracer: fix functions call traces imbalance

Impact: fix traces output

Sometimes one can observe an imbalance in the traces between function
calls and function return traces:

func1() {
    }
}

The curly brace inside func1() is the return of another function nested
inside func1. The return trace have been inserted in the buffer but not
the entry.
We are storing a return address on the function traces stack while we
haven't inserted its entry on the buffer, hence the imbalance on the
traces.

This is because the tracers doesn't check all failures that can happen
on buffer insertion.

This patch reports the tracing recursion failures and the ring buffer
failures. In such cases, we now restore the original return address for
the function, giving up its return trace.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1237843021-11695-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Frederic Weisbecker 2009-03-23 22:17:01 +01:00 committed by Ingo Molnar
parent c4cff064be
commit 1618536961

View File

@ -924,7 +924,7 @@ trace_function(struct trace_array *tr,
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void __trace_graph_entry(struct trace_array *tr, static int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace, struct ftrace_graph_ent *trace,
unsigned long flags, unsigned long flags,
int pc) int pc)
@ -933,15 +933,17 @@ static void __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent_entry *entry; struct ftrace_graph_ent_entry *entry;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return 0;
event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc); sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return 0;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->graph_ent = *trace; entry->graph_ent = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event); ring_buffer_unlock_commit(global_trace.buffer, event);
return 1;
} }
static void __trace_graph_return(struct trace_array *tr, static void __trace_graph_return(struct trace_array *tr,
@ -1162,6 +1164,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
long disabled; long disabled;
int ret;
int cpu; int cpu;
int pc; int pc;
@ -1177,15 +1180,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
__trace_graph_entry(tr, trace, flags, pc); ret = __trace_graph_entry(tr, trace, flags, pc);
} else {
ret = 0;
} }
/* Only do the atomic if it is not already set */ /* Only do the atomic if it is not already set */
if (!test_tsk_trace_graph(current)) if (!test_tsk_trace_graph(current))
set_tsk_trace_graph(current); set_tsk_trace_graph(current);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
return 1; return ret;
} }
void trace_graph_return(struct ftrace_graph_ret *trace) void trace_graph_return(struct ftrace_graph_ret *trace)