mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 22:51:35 +00:00
4dc0da8696
The perf_event overflow handler does not receive any caller-derived argument, so many callers need to resort to looking up the perf_event in their local data structure. This is ugly and doesn't scale if a single callback services many perf_events. Fix by adding a context parameter to perf_event_create_kernel_counter() (and derived hardware breakpoints APIs) and storing it in the perf_event. The field can be accessed from the callback as event->overflow_handler_context. All callers are updated. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1309362157-6596-2-git-send-email-avi@redhat.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
329 lines
7.0 KiB
C
329 lines
7.0 KiB
C
/*
|
|
* Copyright 2010 ARM Ltd.
|
|
*
|
|
* Perf-events backend for OProfile.
|
|
*/
|
|
#include <linux/perf_event.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/oprofile.h>
|
|
#include <linux/slab.h>
|
|
|
|
/*
|
|
* Per performance monitor configuration as set via oprofilefs.
|
|
*/
|
|
struct op_counter_config {
|
|
unsigned long count;
|
|
unsigned long enabled;
|
|
unsigned long event;
|
|
unsigned long unit_mask;
|
|
unsigned long kernel;
|
|
unsigned long user;
|
|
struct perf_event_attr attr;
|
|
};
|
|
|
|
static int oprofile_perf_enabled;
|
|
static DEFINE_MUTEX(oprofile_perf_mutex);
|
|
|
|
static struct op_counter_config *counter_config;
|
|
static struct perf_event **perf_events[nr_cpumask_bits];
|
|
static int num_counters;
|
|
|
|
/*
|
|
* Overflow callback for oprofile.
|
|
*/
|
|
static void op_overflow_handler(struct perf_event *event, int unused,
|
|
struct perf_sample_data *data, struct pt_regs *regs)
|
|
{
|
|
int id;
|
|
u32 cpu = smp_processor_id();
|
|
|
|
for (id = 0; id < num_counters; ++id)
|
|
if (perf_events[cpu][id] == event)
|
|
break;
|
|
|
|
if (id != num_counters)
|
|
oprofile_add_sample(regs, id);
|
|
else
|
|
pr_warning("oprofile: ignoring spurious overflow "
|
|
"on cpu %u\n", cpu);
|
|
}
|
|
|
|
/*
|
|
* Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
|
|
* settings in counter_config. Attributes are created as `pinned' events and
|
|
* so are permanently scheduled on the PMU.
|
|
*/
|
|
static void op_perf_setup(void)
|
|
{
|
|
int i;
|
|
u32 size = sizeof(struct perf_event_attr);
|
|
struct perf_event_attr *attr;
|
|
|
|
for (i = 0; i < num_counters; ++i) {
|
|
attr = &counter_config[i].attr;
|
|
memset(attr, 0, size);
|
|
attr->type = PERF_TYPE_RAW;
|
|
attr->size = size;
|
|
attr->config = counter_config[i].event;
|
|
attr->sample_period = counter_config[i].count;
|
|
attr->pinned = 1;
|
|
}
|
|
}
|
|
|
|
static int op_create_counter(int cpu, int event)
|
|
{
|
|
struct perf_event *pevent;
|
|
|
|
if (!counter_config[event].enabled || perf_events[cpu][event])
|
|
return 0;
|
|
|
|
pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
|
|
cpu, NULL,
|
|
op_overflow_handler, NULL);
|
|
|
|
if (IS_ERR(pevent))
|
|
return PTR_ERR(pevent);
|
|
|
|
if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
|
|
perf_event_release_kernel(pevent);
|
|
pr_warning("oprofile: failed to enable event %d "
|
|
"on CPU %d\n", event, cpu);
|
|
return -EBUSY;
|
|
}
|
|
|
|
perf_events[cpu][event] = pevent;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void op_destroy_counter(int cpu, int event)
|
|
{
|
|
struct perf_event *pevent = perf_events[cpu][event];
|
|
|
|
if (pevent) {
|
|
perf_event_release_kernel(pevent);
|
|
perf_events[cpu][event] = NULL;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Called by oprofile_perf_start to create active perf events based on the
|
|
* perviously configured attributes.
|
|
*/
|
|
static int op_perf_start(void)
|
|
{
|
|
int cpu, event, ret = 0;
|
|
|
|
for_each_online_cpu(cpu) {
|
|
for (event = 0; event < num_counters; ++event) {
|
|
ret = op_create_counter(cpu, event);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Called by oprofile_perf_stop at the end of a profiling run.
|
|
*/
|
|
static void op_perf_stop(void)
|
|
{
|
|
int cpu, event;
|
|
|
|
for_each_online_cpu(cpu)
|
|
for (event = 0; event < num_counters; ++event)
|
|
op_destroy_counter(cpu, event);
|
|
}
|
|
|
|
static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < num_counters; i++) {
|
|
struct dentry *dir;
|
|
char buf[4];
|
|
|
|
snprintf(buf, sizeof buf, "%d", i);
|
|
dir = oprofilefs_mkdir(sb, root, buf);
|
|
oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
|
|
oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
|
|
oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
|
|
oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
|
|
oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
|
|
oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int oprofile_perf_setup(void)
|
|
{
|
|
spin_lock(&oprofilefs_lock);
|
|
op_perf_setup();
|
|
spin_unlock(&oprofilefs_lock);
|
|
return 0;
|
|
}
|
|
|
|
static int oprofile_perf_start(void)
|
|
{
|
|
int ret = -EBUSY;
|
|
|
|
mutex_lock(&oprofile_perf_mutex);
|
|
if (!oprofile_perf_enabled) {
|
|
ret = 0;
|
|
op_perf_start();
|
|
oprofile_perf_enabled = 1;
|
|
}
|
|
mutex_unlock(&oprofile_perf_mutex);
|
|
return ret;
|
|
}
|
|
|
|
static void oprofile_perf_stop(void)
|
|
{
|
|
mutex_lock(&oprofile_perf_mutex);
|
|
if (oprofile_perf_enabled)
|
|
op_perf_stop();
|
|
oprofile_perf_enabled = 0;
|
|
mutex_unlock(&oprofile_perf_mutex);
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
|
|
{
|
|
mutex_lock(&oprofile_perf_mutex);
|
|
if (oprofile_perf_enabled)
|
|
op_perf_stop();
|
|
mutex_unlock(&oprofile_perf_mutex);
|
|
return 0;
|
|
}
|
|
|
|
static int oprofile_perf_resume(struct platform_device *dev)
|
|
{
|
|
mutex_lock(&oprofile_perf_mutex);
|
|
if (oprofile_perf_enabled && op_perf_start())
|
|
oprofile_perf_enabled = 0;
|
|
mutex_unlock(&oprofile_perf_mutex);
|
|
return 0;
|
|
}
|
|
|
|
static struct platform_driver oprofile_driver = {
|
|
.driver = {
|
|
.name = "oprofile-perf",
|
|
},
|
|
.resume = oprofile_perf_resume,
|
|
.suspend = oprofile_perf_suspend,
|
|
};
|
|
|
|
static struct platform_device *oprofile_pdev;
|
|
|
|
static int __init init_driverfs(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = platform_driver_register(&oprofile_driver);
|
|
if (ret)
|
|
return ret;
|
|
|
|
oprofile_pdev = platform_device_register_simple(
|
|
oprofile_driver.driver.name, 0, NULL, 0);
|
|
if (IS_ERR(oprofile_pdev)) {
|
|
ret = PTR_ERR(oprofile_pdev);
|
|
platform_driver_unregister(&oprofile_driver);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void exit_driverfs(void)
|
|
{
|
|
platform_device_unregister(oprofile_pdev);
|
|
platform_driver_unregister(&oprofile_driver);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline int init_driverfs(void) { return 0; }
|
|
static inline void exit_driverfs(void) { }
|
|
|
|
#endif /* CONFIG_PM */
|
|
|
|
void oprofile_perf_exit(void)
|
|
{
|
|
int cpu, id;
|
|
struct perf_event *event;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
for (id = 0; id < num_counters; ++id) {
|
|
event = perf_events[cpu][id];
|
|
if (event)
|
|
perf_event_release_kernel(event);
|
|
}
|
|
|
|
kfree(perf_events[cpu]);
|
|
}
|
|
|
|
kfree(counter_config);
|
|
exit_driverfs();
|
|
}
|
|
|
|
int __init oprofile_perf_init(struct oprofile_operations *ops)
|
|
{
|
|
int cpu, ret = 0;
|
|
|
|
ret = init_driverfs();
|
|
if (ret)
|
|
return ret;
|
|
|
|
memset(&perf_events, 0, sizeof(perf_events));
|
|
|
|
num_counters = perf_num_counters();
|
|
if (num_counters <= 0) {
|
|
pr_info("oprofile: no performance counters\n");
|
|
ret = -ENODEV;
|
|
goto out;
|
|
}
|
|
|
|
counter_config = kcalloc(num_counters,
|
|
sizeof(struct op_counter_config), GFP_KERNEL);
|
|
|
|
if (!counter_config) {
|
|
pr_info("oprofile: failed to allocate %d "
|
|
"counters\n", num_counters);
|
|
ret = -ENOMEM;
|
|
num_counters = 0;
|
|
goto out;
|
|
}
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
perf_events[cpu] = kcalloc(num_counters,
|
|
sizeof(struct perf_event *), GFP_KERNEL);
|
|
if (!perf_events[cpu]) {
|
|
pr_info("oprofile: failed to allocate %d perf events "
|
|
"for cpu %d\n", num_counters, cpu);
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
ops->create_files = oprofile_perf_create_files;
|
|
ops->setup = oprofile_perf_setup;
|
|
ops->start = oprofile_perf_start;
|
|
ops->stop = oprofile_perf_stop;
|
|
ops->shutdown = oprofile_perf_stop;
|
|
ops->cpu_type = op_name_from_perf_id();
|
|
|
|
if (!ops->cpu_type)
|
|
ret = -ENODEV;
|
|
else
|
|
pr_info("oprofile: using %s\n", ops->cpu_type);
|
|
|
|
out:
|
|
if (ret)
|
|
oprofile_perf_exit();
|
|
|
|
return ret;
|
|
}
|