mirror of
https://github.com/torvalds/linux.git
synced 2024-11-08 13:11:45 +00:00
d077526485
Introducing following bits to the the perf_event_attr struct: - exclude_callchain_kernel to filter out kernel callchain from the sample dump - exclude_callchain_user to filter out user callchain from the sample dump We need to be able to disable standard user callchain dump when we use the dwarf cfi callchain mode, because frame pointer based user callchains are useless in this mode. Implementing also exclude_callchain_kernel to have complete set of options. Signed-off-by: Jiri Olsa <jolsa@redhat.com> [ Added kernel callchains filtering ] Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Arun Sharma <asharma@fb.com> Cc: Benjamin Redelings <benjamin.redelings@nescent.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Frank Ch. Eigler <fche@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Robert Richter <robert.richter@amd.com> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Ulrich Drepper <drepper@gmail.com> Link: http://lkml.kernel.org/r/1344345647-11536-7-git-send-email-jolsa@redhat.com Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
207 lines
4.3 KiB
C
207 lines
4.3 KiB
C
/*
|
|
* Performance events callchain code, extracted from core.c:
|
|
*
|
|
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
|
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
|
*
|
|
* For licensing details see kernel-base/COPYING
|
|
*/
|
|
|
|
#include <linux/perf_event.h>
|
|
#include <linux/slab.h>
|
|
#include "internal.h"
|
|
|
|
struct callchain_cpus_entries {
|
|
struct rcu_head rcu_head;
|
|
struct perf_callchain_entry *cpu_entries[0];
|
|
};
|
|
|
|
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
|
static atomic_t nr_callchain_events;
|
|
static DEFINE_MUTEX(callchain_mutex);
|
|
static struct callchain_cpus_entries *callchain_cpus_entries;
|
|
|
|
|
|
__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
}
|
|
|
|
__weak void perf_callchain_user(struct perf_callchain_entry *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
}
|
|
|
|
static void release_callchain_buffers_rcu(struct rcu_head *head)
|
|
{
|
|
struct callchain_cpus_entries *entries;
|
|
int cpu;
|
|
|
|
entries = container_of(head, struct callchain_cpus_entries, rcu_head);
|
|
|
|
for_each_possible_cpu(cpu)
|
|
kfree(entries->cpu_entries[cpu]);
|
|
|
|
kfree(entries);
|
|
}
|
|
|
|
static void release_callchain_buffers(void)
|
|
{
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
entries = callchain_cpus_entries;
|
|
rcu_assign_pointer(callchain_cpus_entries, NULL);
|
|
call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
|
|
}
|
|
|
|
static int alloc_callchain_buffers(void)
|
|
{
|
|
int cpu;
|
|
int size;
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
/*
|
|
* We can't use the percpu allocation API for data that can be
|
|
* accessed from NMI. Use a temporary manual per cpu allocation
|
|
* until that gets sorted out.
|
|
*/
|
|
size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
|
|
|
|
entries = kzalloc(size, GFP_KERNEL);
|
|
if (!entries)
|
|
return -ENOMEM;
|
|
|
|
size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
|
|
cpu_to_node(cpu));
|
|
if (!entries->cpu_entries[cpu])
|
|
goto fail;
|
|
}
|
|
|
|
rcu_assign_pointer(callchain_cpus_entries, entries);
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
for_each_possible_cpu(cpu)
|
|
kfree(entries->cpu_entries[cpu]);
|
|
kfree(entries);
|
|
|
|
return -ENOMEM;
|
|
}
|
|
|
|
int get_callchain_buffers(void)
|
|
{
|
|
int err = 0;
|
|
int count;
|
|
|
|
mutex_lock(&callchain_mutex);
|
|
|
|
count = atomic_inc_return(&nr_callchain_events);
|
|
if (WARN_ON_ONCE(count < 1)) {
|
|
err = -EINVAL;
|
|
goto exit;
|
|
}
|
|
|
|
if (count > 1) {
|
|
/* If the allocation failed, give up */
|
|
if (!callchain_cpus_entries)
|
|
err = -ENOMEM;
|
|
goto exit;
|
|
}
|
|
|
|
err = alloc_callchain_buffers();
|
|
exit:
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
void put_callchain_buffers(void)
|
|
{
|
|
if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
|
|
release_callchain_buffers();
|
|
mutex_unlock(&callchain_mutex);
|
|
}
|
|
}
|
|
|
|
static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
|
{
|
|
int cpu;
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
|
|
if (*rctx == -1)
|
|
return NULL;
|
|
|
|
entries = rcu_dereference(callchain_cpus_entries);
|
|
if (!entries)
|
|
return NULL;
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
return &entries->cpu_entries[cpu][*rctx];
|
|
}
|
|
|
|
static void
|
|
put_callchain_entry(int rctx)
|
|
{
|
|
put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
|
|
}
|
|
|
|
struct perf_callchain_entry *
|
|
perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
|
{
|
|
int rctx;
|
|
struct perf_callchain_entry *entry;
|
|
|
|
int kernel = !event->attr.exclude_callchain_kernel;
|
|
int user = !event->attr.exclude_callchain_user;
|
|
|
|
if (!kernel && !user)
|
|
return NULL;
|
|
|
|
entry = get_callchain_entry(&rctx);
|
|
if (rctx == -1)
|
|
return NULL;
|
|
|
|
if (!entry)
|
|
goto exit_put;
|
|
|
|
entry->nr = 0;
|
|
|
|
if (kernel && !user_mode(regs)) {
|
|
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
perf_callchain_kernel(entry, regs);
|
|
}
|
|
|
|
if (user) {
|
|
if (!user_mode(regs)) {
|
|
if (current->mm)
|
|
regs = task_pt_regs(current);
|
|
else
|
|
regs = NULL;
|
|
}
|
|
|
|
if (regs) {
|
|
/*
|
|
* Disallow cross-task user callchains.
|
|
*/
|
|
if (event->ctx->task && event->ctx->task != current)
|
|
goto exit_put;
|
|
|
|
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
|
perf_callchain_user(entry, regs);
|
|
}
|
|
}
|
|
|
|
exit_put:
|
|
put_callchain_entry(rctx);
|
|
|
|
return entry;
|
|
}
|