mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
perf/ftrace: Optimize perf/tracepoint interaction for single events
When we've got but a single event per tracepoint there is no reason to try and multiplex it so don't. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Ingo Molnar <mingo@elte.hu> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ef4f30f54e
commit
4f41c013f5
@ -132,6 +132,7 @@ struct ftrace_event_call {
|
||||
void *data;
|
||||
|
||||
int perf_refcount;
|
||||
void *perf_data;
|
||||
int (*perf_event_enable)(struct ftrace_event_call *);
|
||||
void (*perf_event_disable)(struct ftrace_event_call *);
|
||||
};
|
||||
@ -190,7 +191,7 @@ struct perf_event;
|
||||
|
||||
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
|
||||
|
||||
extern int perf_trace_enable(int event_id);
|
||||
extern int perf_trace_enable(int event_id, void *data);
|
||||
extern void perf_trace_disable(int event_id);
|
||||
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||
char *filter_str);
|
||||
@ -201,11 +202,12 @@ perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
|
||||
|
||||
static inline void
|
||||
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
|
||||
u64 count, unsigned long irq_flags, struct pt_regs *regs)
|
||||
u64 count, unsigned long irq_flags, struct pt_regs *regs,
|
||||
void *event)
|
||||
{
|
||||
struct trace_entry *entry = raw_data;
|
||||
|
||||
perf_tp_event(entry->type, addr, count, raw_data, size, regs);
|
||||
perf_tp_event(entry->type, addr, count, raw_data, size, regs, event);
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
local_irq_restore(irq_flags);
|
||||
}
|
||||
|
@ -994,7 +994,7 @@ static inline bool perf_paranoid_kernel(void)
|
||||
|
||||
extern void perf_event_init(void);
|
||||
extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||
int entry_size, struct pt_regs *regs);
|
||||
int entry_size, struct pt_regs *regs, void *event);
|
||||
extern void perf_bp_event(struct perf_event *event, void *data);
|
||||
|
||||
#ifndef perf_misc_flags
|
||||
|
@ -785,7 +785,8 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
|
||||
{ assign; } \
|
||||
\
|
||||
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
|
||||
__count, irq_flags, __regs); \
|
||||
__count, irq_flags, __regs, \
|
||||
event_call->perf_data); \
|
||||
}
|
||||
|
||||
#undef DEFINE_EVENT
|
||||
|
@ -4468,8 +4468,9 @@ static int swevent_hlist_get(struct perf_event *event)
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
|
||||
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||
int entry_size, struct pt_regs *regs)
|
||||
int entry_size, struct pt_regs *regs, void *event)
|
||||
{
|
||||
const int type = PERF_TYPE_TRACEPOINT;
|
||||
struct perf_sample_data data;
|
||||
struct perf_raw_record raw = {
|
||||
.size = entry_size,
|
||||
@ -4479,9 +4480,13 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||
perf_sample_data_init(&data, addr);
|
||||
data.raw = &raw;
|
||||
|
||||
/* Trace events already protected against recursion */
|
||||
do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
|
||||
&data, regs);
|
||||
if (!event) {
|
||||
do_perf_sw_event(type, event_id, count, 1, &data, regs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (perf_swevent_match(event, type, event_id, &data, regs))
|
||||
perf_swevent_add(event, count, 1, &data, regs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_tp_event);
|
||||
|
||||
@ -4514,7 +4519,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
||||
!capable(CAP_SYS_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
if (perf_trace_enable(event->attr.config))
|
||||
if (perf_trace_enable(event->attr.config, event))
|
||||
return NULL;
|
||||
|
||||
event->destroy = tp_perf_event_destroy;
|
||||
|
@ -27,13 +27,15 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
||||
/* Count the events in use (per event id, not per instance) */
|
||||
static int total_ref_count;
|
||||
|
||||
static int perf_trace_event_enable(struct ftrace_event_call *event)
|
||||
static int perf_trace_event_enable(struct ftrace_event_call *event, void *data)
|
||||
{
|
||||
char *buf;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (event->perf_refcount++ > 0)
|
||||
if (event->perf_refcount++ > 0) {
|
||||
event->perf_data = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!total_ref_count) {
|
||||
buf = (char *)alloc_percpu(perf_trace_t);
|
||||
@ -51,6 +53,7 @@ static int perf_trace_event_enable(struct ftrace_event_call *event)
|
||||
|
||||
ret = event->perf_event_enable(event);
|
||||
if (!ret) {
|
||||
event->perf_data = data;
|
||||
total_ref_count++;
|
||||
return 0;
|
||||
}
|
||||
@ -68,7 +71,7 @@ fail_buf:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int perf_trace_enable(int event_id)
|
||||
int perf_trace_enable(int event_id, void *data)
|
||||
{
|
||||
struct ftrace_event_call *event;
|
||||
int ret = -EINVAL;
|
||||
@ -77,7 +80,7 @@ int perf_trace_enable(int event_id)
|
||||
list_for_each_entry(event, &ftrace_events, list) {
|
||||
if (event->id == event_id && event->perf_event_enable &&
|
||||
try_module_get(event->mod)) {
|
||||
ret = perf_trace_event_enable(event);
|
||||
ret = perf_trace_event_enable(event, data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1362,7 +1362,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
|
||||
|
||||
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
|
||||
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs, call->perf_data);
|
||||
}
|
||||
|
||||
/* Kretprobe profile handler */
|
||||
@ -1395,7 +1395,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
|
||||
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
|
||||
|
||||
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
|
||||
irq_flags, regs);
|
||||
irq_flags, regs, call->perf_data);
|
||||
}
|
||||
|
||||
static int probe_perf_enable(struct ftrace_event_call *call)
|
||||
|
@ -468,7 +468,8 @@ static void perf_syscall_enter(struct pt_regs *regs, long id)
|
||||
rec->nr = syscall_nr;
|
||||
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
|
||||
(unsigned long *)&rec->args);
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs,
|
||||
sys_data->enter_event->perf_data);
|
||||
}
|
||||
|
||||
int perf_sysenter_enable(struct ftrace_event_call *call)
|
||||
@ -543,7 +544,8 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret)
|
||||
rec->nr = syscall_nr;
|
||||
rec->ret = syscall_get_return_value(current, regs);
|
||||
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs,
|
||||
sys_data->exit_event->perf_data);
|
||||
}
|
||||
|
||||
int perf_sysexit_enable(struct ftrace_event_call *call)
|
||||
|
Loading…
Reference in New Issue
Block a user