forked from Minki/linux
0429fbc0bd
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
210 lines
4.3 KiB
C
210 lines
4.3 KiB
C
/*
|
|
* Performance events callchain code, extracted from core.c:
|
|
*
|
|
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
|
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
|
*
|
|
* For licensing details see kernel-base/COPYING
|
|
*/
|
|
|
|
#include <linux/perf_event.h>
|
|
#include <linux/slab.h>
|
|
#include "internal.h"
|
|
|
|
struct callchain_cpus_entries {
|
|
struct rcu_head rcu_head;
|
|
struct perf_callchain_entry *cpu_entries[0];
|
|
};
|
|
|
|
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
|
static atomic_t nr_callchain_events;
|
|
static DEFINE_MUTEX(callchain_mutex);
|
|
static struct callchain_cpus_entries *callchain_cpus_entries;
|
|
|
|
|
|
__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
}
|
|
|
|
__weak void perf_callchain_user(struct perf_callchain_entry *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
}
|
|
|
|
static void release_callchain_buffers_rcu(struct rcu_head *head)
|
|
{
|
|
struct callchain_cpus_entries *entries;
|
|
int cpu;
|
|
|
|
entries = container_of(head, struct callchain_cpus_entries, rcu_head);
|
|
|
|
for_each_possible_cpu(cpu)
|
|
kfree(entries->cpu_entries[cpu]);
|
|
|
|
kfree(entries);
|
|
}
|
|
|
|
static void release_callchain_buffers(void)
|
|
{
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
entries = callchain_cpus_entries;
|
|
RCU_INIT_POINTER(callchain_cpus_entries, NULL);
|
|
call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
|
|
}
|
|
|
|
static int alloc_callchain_buffers(void)
|
|
{
|
|
int cpu;
|
|
int size;
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
/*
|
|
* We can't use the percpu allocation API for data that can be
|
|
* accessed from NMI. Use a temporary manual per cpu allocation
|
|
* until that gets sorted out.
|
|
*/
|
|
size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
|
|
|
|
entries = kzalloc(size, GFP_KERNEL);
|
|
if (!entries)
|
|
return -ENOMEM;
|
|
|
|
size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
|
|
cpu_to_node(cpu));
|
|
if (!entries->cpu_entries[cpu])
|
|
goto fail;
|
|
}
|
|
|
|
rcu_assign_pointer(callchain_cpus_entries, entries);
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
for_each_possible_cpu(cpu)
|
|
kfree(entries->cpu_entries[cpu]);
|
|
kfree(entries);
|
|
|
|
return -ENOMEM;
|
|
}
|
|
|
|
int get_callchain_buffers(void)
|
|
{
|
|
int err = 0;
|
|
int count;
|
|
|
|
mutex_lock(&callchain_mutex);
|
|
|
|
count = atomic_inc_return(&nr_callchain_events);
|
|
if (WARN_ON_ONCE(count < 1)) {
|
|
err = -EINVAL;
|
|
goto exit;
|
|
}
|
|
|
|
if (count > 1) {
|
|
/* If the allocation failed, give up */
|
|
if (!callchain_cpus_entries)
|
|
err = -ENOMEM;
|
|
goto exit;
|
|
}
|
|
|
|
err = alloc_callchain_buffers();
|
|
exit:
|
|
if (err)
|
|
atomic_dec(&nr_callchain_events);
|
|
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
void put_callchain_buffers(void)
|
|
{
|
|
if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
|
|
release_callchain_buffers();
|
|
mutex_unlock(&callchain_mutex);
|
|
}
|
|
}
|
|
|
|
static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
|
{
|
|
int cpu;
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
|
|
if (*rctx == -1)
|
|
return NULL;
|
|
|
|
entries = rcu_dereference(callchain_cpus_entries);
|
|
if (!entries)
|
|
return NULL;
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
return &entries->cpu_entries[cpu][*rctx];
|
|
}
|
|
|
|
static void
|
|
put_callchain_entry(int rctx)
|
|
{
|
|
put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
|
|
}
|
|
|
|
struct perf_callchain_entry *
|
|
perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
|
{
|
|
int rctx;
|
|
struct perf_callchain_entry *entry;
|
|
|
|
int kernel = !event->attr.exclude_callchain_kernel;
|
|
int user = !event->attr.exclude_callchain_user;
|
|
|
|
if (!kernel && !user)
|
|
return NULL;
|
|
|
|
entry = get_callchain_entry(&rctx);
|
|
if (rctx == -1)
|
|
return NULL;
|
|
|
|
if (!entry)
|
|
goto exit_put;
|
|
|
|
entry->nr = 0;
|
|
|
|
if (kernel && !user_mode(regs)) {
|
|
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
perf_callchain_kernel(entry, regs);
|
|
}
|
|
|
|
if (user) {
|
|
if (!user_mode(regs)) {
|
|
if (current->mm)
|
|
regs = task_pt_regs(current);
|
|
else
|
|
regs = NULL;
|
|
}
|
|
|
|
if (regs) {
|
|
/*
|
|
* Disallow cross-task user callchains.
|
|
*/
|
|
if (event->ctx->task && event->ctx->task != current)
|
|
goto exit_put;
|
|
|
|
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
|
perf_callchain_user(entry, regs);
|
|
}
|
|
}
|
|
|
|
exit_put:
|
|
put_callchain_entry(rctx);
|
|
|
|
return entry;
|
|
}
|