2008-12-04 19:12:29 +00:00
|
|
|
/*
|
|
|
|
* Performance counter core code
|
|
|
|
*
|
|
|
|
* Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
* Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
|
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/smp.h>
|
2008-12-11 07:38:42 +00:00
|
|
|
#include <linux/file.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/sysfs.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/anon_inodes.h>
|
2008-12-17 13:10:57 +00:00
|
|
|
#include <linux/kernel_stat.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
#include <linux/perf_counter.h>
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/vmstat.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Each CPU has a list of per CPU counters:
|
|
|
|
*/
|
|
|
|
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
|
|
|
|
|
2008-12-14 19:21:00 +00:00
|
|
|
int perf_max_counters __read_mostly = 1;
|
2008-12-04 19:12:29 +00:00
|
|
|
static int perf_reserved_percpu __read_mostly;
|
|
|
|
static int perf_overcommit __read_mostly = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mutex for (sysadmin-configurable) counter reservations:
|
|
|
|
*/
|
|
|
|
static DEFINE_MUTEX(perf_resource_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Architecture provided APIs - weak aliases:
|
|
|
|
*/
|
2008-12-11 12:21:10 +00:00
|
|
|
extern __weak const struct hw_perf_counter_ops *
|
2008-12-11 11:46:46 +00:00
|
|
|
hw_perf_counter_init(struct perf_counter *counter)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
2009-01-09 05:19:25 +00:00
|
|
|
return NULL;
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
2008-12-11 12:45:51 +00:00
|
|
|
u64 __weak hw_perf_save_disable(void) { return 0; }
|
2008-12-27 05:05:06 +00:00
|
|
|
void __weak hw_perf_restore(u64 ctrl) { barrier(); }
|
2009-01-14 02:44:19 +00:00
|
|
|
void __weak hw_perf_counter_setup(int cpu) { barrier(); }
|
2009-01-09 05:43:42 +00:00
|
|
|
int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx, int cpu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2009-01-09 06:24:34 +00:00
|
|
|
void __weak perf_counter_print_debug(void) { }
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
static void
|
|
|
|
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
|
|
|
|
{
|
|
|
|
struct perf_counter *group_leader = counter->group_leader;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Depending on whether it is a standalone or sibling counter,
|
|
|
|
* add it straight to the context's counter list, or to the group
|
|
|
|
* leader's sibling list:
|
|
|
|
*/
|
|
|
|
if (counter->group_leader == counter)
|
|
|
|
list_add_tail(&counter->list_entry, &ctx->counter_list);
|
|
|
|
else
|
|
|
|
list_add_tail(&counter->list_entry, &group_leader->sibling_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
|
|
|
|
{
|
|
|
|
struct perf_counter *sibling, *tmp;
|
|
|
|
|
|
|
|
list_del_init(&counter->list_entry);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this was a group counter with sibling counters then
|
|
|
|
* upgrade the siblings to singleton counters by adding them
|
|
|
|
* to the context list directly:
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe(sibling, tmp,
|
|
|
|
&counter->sibling_list, list_entry) {
|
|
|
|
|
|
|
|
list_del_init(&sibling->list_entry);
|
|
|
|
list_add_tail(&sibling->list_entry, &ctx->counter_list);
|
|
|
|
sibling->group_leader = sibling;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
static void
|
|
|
|
counter_sched_out(struct perf_counter *counter,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx)
|
|
|
|
{
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
|
|
|
counter->hw_ops->disable(counter);
|
|
|
|
counter->oncpu = -1;
|
|
|
|
|
|
|
|
if (!is_software_counter(counter))
|
|
|
|
cpuctx->active_oncpu--;
|
|
|
|
ctx->nr_active--;
|
|
|
|
if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
|
|
|
|
cpuctx->exclusive = 0;
|
|
|
|
}
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
static void
|
|
|
|
group_sched_out(struct perf_counter *group_counter,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
counter_sched_out(group_counter, cpuctx, ctx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Schedule out siblings (if any):
|
|
|
|
*/
|
|
|
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
|
|
|
if (group_counter->hw_event.exclusive)
|
|
|
|
cpuctx->exclusive = 0;
|
|
|
|
}
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
/*
|
|
|
|
* Cross CPU call to remove a performance counter
|
|
|
|
*
|
|
|
|
* We disable the counter on the hardware level first. After that we
|
|
|
|
* remove it from the context list.
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
static void __perf_counter_remove_from_context(void *info)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
2008-12-12 12:49:45 +00:00
|
|
|
unsigned long flags;
|
2008-12-11 12:21:10 +00:00
|
|
|
u64 perf_flags;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a task context, we need to check whether it is
|
|
|
|
* the current task context of this cpu. If not it has been
|
|
|
|
* scheduled out before the smp call arrived.
|
|
|
|
*/
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
|
|
|
return;
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_lock_irq_save(&flags);
|
|
|
|
spin_lock(&ctx->lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
|
|
|
counter->task = NULL;
|
2008-12-04 19:12:29 +00:00
|
|
|
ctx->nr_counters--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect the list operation against NMI by disabling the
|
|
|
|
* counters on a global level. NOP for non NMI based counters.
|
|
|
|
*/
|
2008-12-11 12:45:51 +00:00
|
|
|
perf_flags = hw_perf_save_disable();
|
2008-12-11 07:38:42 +00:00
|
|
|
list_del_counter(counter, ctx);
|
2008-12-11 12:45:51 +00:00
|
|
|
hw_perf_restore(perf_flags);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
if (!ctx->task) {
|
|
|
|
/*
|
|
|
|
* Allow more per task counters with respect to the
|
|
|
|
* reservation:
|
|
|
|
*/
|
|
|
|
cpuctx->max_pertask =
|
|
|
|
min(perf_max_counters - ctx->nr_counters,
|
|
|
|
perf_max_counters - perf_reserved_percpu);
|
|
|
|
}
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the counter from a task's (or a CPU's) list of counters.
|
|
|
|
*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
* Must be called with counter->mutex and ctx->mutex held.
|
2008-12-04 19:12:29 +00:00
|
|
|
*
|
|
|
|
* CPU counters are removed with a smp call. For task counters we only
|
|
|
|
* call when the task is on a CPU.
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
static void perf_counter_remove_from_context(struct perf_counter *counter)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
/*
|
|
|
|
* Per cpu counters are removed via an smp call and
|
|
|
|
* the removal is always sucessful.
|
|
|
|
*/
|
|
|
|
smp_call_function_single(counter->cpu,
|
2008-12-11 07:38:42 +00:00
|
|
|
__perf_counter_remove_from_context,
|
2008-12-04 19:12:29 +00:00
|
|
|
counter, 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
2008-12-11 07:38:42 +00:00
|
|
|
task_oncpu_function_call(task, __perf_counter_remove_from_context,
|
2008-12-04 19:12:29 +00:00
|
|
|
counter);
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
/*
|
|
|
|
* If the context is active we need to retry the smp call.
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
if (ctx->nr_active && !list_empty(&counter->list_entry)) {
|
2008-12-04 19:12:29 +00:00
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The lock prevents that this context is scheduled in so we
|
2008-12-11 07:38:42 +00:00
|
|
|
* can remove the counter safely, if the call above did not
|
2008-12-04 19:12:29 +00:00
|
|
|
* succeed.
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
if (!list_empty(&counter->list_entry)) {
|
2008-12-04 19:12:29 +00:00
|
|
|
ctx->nr_counters--;
|
2008-12-11 07:38:42 +00:00
|
|
|
list_del_counter(counter, ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
counter->task = NULL;
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
/*
|
|
|
|
* Cross CPU call to disable a performance counter
|
|
|
|
*/
|
|
|
|
static void __perf_counter_disable(void *info)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a per-task counter, need to check whether this
|
|
|
|
* counter's task is the current task on this cpu.
|
|
|
|
*/
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
curr_rq_lock_irq_save(&flags);
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the counter is on, turn it off.
|
|
|
|
* If it is in error state, leave it in error state.
|
|
|
|
*/
|
|
|
|
if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
|
|
|
|
if (counter == counter->group_leader)
|
|
|
|
group_sched_out(counter, cpuctx, ctx);
|
|
|
|
else
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable a counter.
|
|
|
|
*/
|
|
|
|
static void perf_counter_disable(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
/*
|
|
|
|
* Disable the counter on the cpu that it's on
|
|
|
|
*/
|
|
|
|
smp_call_function_single(counter->cpu, __perf_counter_disable,
|
|
|
|
counter, 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
|
|
|
task_oncpu_function_call(task, __perf_counter_disable, counter);
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
/*
|
|
|
|
* If the counter is still active, we need to retry the cross-call.
|
|
|
|
*/
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we have the lock this context can't be scheduled
|
|
|
|
* in, so we can change the state safely.
|
|
|
|
*/
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable a counter and all its children.
|
|
|
|
*/
|
|
|
|
static void perf_counter_disable_family(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
struct perf_counter *child;
|
|
|
|
|
|
|
|
perf_counter_disable(counter);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the mutex to protect the list of children
|
|
|
|
*/
|
|
|
|
mutex_lock(&counter->mutex);
|
|
|
|
list_for_each_entry(child, &counter->child_list, child_list)
|
|
|
|
perf_counter_disable(child);
|
|
|
|
mutex_unlock(&counter->mutex);
|
|
|
|
}
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
static int
|
|
|
|
counter_sched_in(struct perf_counter *counter,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx,
|
|
|
|
int cpu)
|
|
|
|
{
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (counter->state <= PERF_COUNTER_STATE_OFF)
|
2008-12-21 13:43:25 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_ACTIVE;
|
|
|
|
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
|
|
|
|
/*
|
|
|
|
* The new state must be visible before we turn it on in the hardware:
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
|
|
|
|
if (counter->hw_ops->enable(counter)) {
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
|
|
|
counter->oncpu = -1;
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (!is_software_counter(counter))
|
|
|
|
cpuctx->active_oncpu++;
|
2008-12-21 13:43:25 +00:00
|
|
|
ctx->nr_active++;
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (counter->hw_event.exclusive)
|
|
|
|
cpuctx->exclusive = 1;
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
/*
|
|
|
|
* Return 1 for a group consisting entirely of software counters,
|
|
|
|
* 0 if the group contains any hardware counters.
|
|
|
|
*/
|
|
|
|
static int is_software_only_group(struct perf_counter *leader)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
if (!is_software_counter(leader))
|
|
|
|
return 0;
|
|
|
|
list_for_each_entry(counter, &leader->sibling_list, list_entry)
|
|
|
|
if (!is_software_counter(counter))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Work out whether we can put this counter group on the CPU now.
|
|
|
|
*/
|
|
|
|
static int group_can_go_on(struct perf_counter *counter,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
int can_add_hw)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Groups consisting entirely of software counters can always go on.
|
|
|
|
*/
|
|
|
|
if (is_software_only_group(counter))
|
|
|
|
return 1;
|
|
|
|
/*
|
|
|
|
* If an exclusive group is already on, no other hardware
|
|
|
|
* counters can go on.
|
|
|
|
*/
|
|
|
|
if (cpuctx->exclusive)
|
|
|
|
return 0;
|
|
|
|
/*
|
|
|
|
* If this group is exclusive and there are already
|
|
|
|
* counters on the CPU, it can't go on.
|
|
|
|
*/
|
|
|
|
if (counter->hw_event.exclusive && cpuctx->active_oncpu)
|
|
|
|
return 0;
|
|
|
|
/*
|
|
|
|
* Otherwise, try to add it if all previous groups were able
|
|
|
|
* to go on.
|
|
|
|
*/
|
|
|
|
return can_add_hw;
|
|
|
|
}
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
/*
|
2008-12-21 13:43:25 +00:00
|
|
|
* Cross CPU call to install and enable a performance counter
|
2008-12-04 19:12:29 +00:00
|
|
|
*/
|
|
|
|
static void __perf_install_in_context(void *info)
|
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
struct perf_counter *leader = counter->group_leader;
|
2008-12-04 19:12:29 +00:00
|
|
|
int cpu = smp_processor_id();
|
2008-12-12 12:49:45 +00:00
|
|
|
unsigned long flags;
|
2008-12-11 12:21:10 +00:00
|
|
|
u64 perf_flags;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
int err;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a task context, we need to check whether it is
|
|
|
|
* the current task context of this cpu. If not it has been
|
|
|
|
* scheduled out before the smp call arrived.
|
|
|
|
*/
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
|
|
|
return;
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_lock_irq_save(&flags);
|
|
|
|
spin_lock(&ctx->lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect the list operation against NMI by disabling the
|
|
|
|
* counters on a global level. NOP for non NMI based counters.
|
|
|
|
*/
|
2008-12-11 12:45:51 +00:00
|
|
|
perf_flags = hw_perf_save_disable();
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
list_add_counter(counter, ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
ctx->nr_counters++;
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
counter->prev_state = PERF_COUNTER_STATE_OFF;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
/*
|
|
|
|
* Don't put the counter on if it is disabled or if
|
|
|
|
* it is in a group and the group isn't on.
|
|
|
|
*/
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
|
|
|
|
(leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
|
|
|
|
goto unlock;
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
/*
|
|
|
|
* An exclusive counter can't go on if there are already active
|
|
|
|
* hardware counters, and no hardware counter can go on if there
|
|
|
|
* is already an exclusive counter on.
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (!group_can_go_on(counter, cpuctx, 1))
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
err = -EEXIST;
|
|
|
|
else
|
|
|
|
err = counter_sched_in(counter, cpuctx, ctx, cpu);
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (err) {
|
|
|
|
/*
|
|
|
|
* This counter couldn't go on. If it is in a group
|
|
|
|
* then we have to pull the whole group off.
|
|
|
|
* If the counter group is pinned then put it in error state.
|
|
|
|
*/
|
|
|
|
if (leader != counter)
|
|
|
|
group_sched_out(leader, cpuctx, ctx);
|
|
|
|
if (leader->hw_event.pinned)
|
|
|
|
leader->state = PERF_COUNTER_STATE_ERROR;
|
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (!err && !ctx->task && cpuctx->max_pertask)
|
2008-12-04 19:12:29 +00:00
|
|
|
cpuctx->max_pertask--;
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
unlock:
|
2008-12-21 13:43:25 +00:00
|
|
|
hw_perf_restore(perf_flags);
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach a performance counter to a context
|
|
|
|
*
|
|
|
|
* First we add the counter to the list with the hardware enable bit
|
|
|
|
* in counter->hw_config cleared.
|
|
|
|
*
|
|
|
|
* If the counter is attached to a task which is on a CPU we use a smp
|
|
|
|
* call to enable it in the task context. The task might have been
|
|
|
|
* scheduled away, but we check this in the smp call again.
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
*
|
|
|
|
* Must be called with ctx->mutex held.
|
2008-12-04 19:12:29 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
perf_install_in_context(struct perf_counter_context *ctx,
|
|
|
|
struct perf_counter *counter,
|
|
|
|
int cpu)
|
|
|
|
{
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
/*
|
|
|
|
* Per cpu counters are installed via an smp call and
|
|
|
|
* the install is always sucessful.
|
|
|
|
*/
|
|
|
|
smp_call_function_single(cpu, __perf_install_in_context,
|
|
|
|
counter, 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
counter->task = task;
|
|
|
|
retry:
|
|
|
|
task_oncpu_function_call(task, __perf_install_in_context,
|
|
|
|
counter);
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
/*
|
|
|
|
* we need to retry the smp call.
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (ctx->is_active && list_empty(&counter->list_entry)) {
|
2008-12-04 19:12:29 +00:00
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The lock prevents that this context is scheduled in so we
|
|
|
|
* can add the counter safely, if it the call above did not
|
|
|
|
* succeed.
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
if (list_empty(&counter->list_entry)) {
|
|
|
|
list_add_counter(counter, ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
ctx->nr_counters++;
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
/*
|
|
|
|
* Cross CPU call to enable a performance counter
|
|
|
|
*/
|
|
|
|
static void __perf_counter_enable(void *info)
|
2008-12-11 07:38:42 +00:00
|
|
|
{
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
struct perf_counter *counter = info;
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
struct perf_counter *leader = counter->group_leader;
|
|
|
|
unsigned long flags;
|
|
|
|
int err;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
/*
|
|
|
|
* If this is a per-task counter, need to check whether this
|
|
|
|
* counter's task is the current task on this cpu.
|
|
|
|
*/
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
2009-01-09 05:43:42 +00:00
|
|
|
return;
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
curr_rq_lock_irq_save(&flags);
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
counter->prev_state = counter->state;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
goto unlock;
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
* If the counter is in a group and isn't the group leader,
|
|
|
|
* then don't put it on unless the group is on.
|
2008-12-11 07:38:42 +00:00
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
goto unlock;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (!group_can_go_on(counter, cpuctx, 1))
|
|
|
|
err = -EEXIST;
|
|
|
|
else
|
|
|
|
err = counter_sched_in(counter, cpuctx, ctx,
|
|
|
|
smp_processor_id());
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
/*
|
|
|
|
* If this counter can't go on and it's part of a
|
|
|
|
* group, then the whole group has to come off.
|
|
|
|
*/
|
|
|
|
if (leader != counter)
|
|
|
|
group_sched_out(leader, cpuctx, ctx);
|
|
|
|
if (leader->hw_event.pinned)
|
|
|
|
leader->state = PERF_COUNTER_STATE_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable a counter.
|
|
|
|
*/
|
|
|
|
static void perf_counter_enable(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
/*
|
|
|
|
* Enable the counter on the cpu that it's on
|
|
|
|
*/
|
|
|
|
smp_call_function_single(counter->cpu, __perf_counter_enable,
|
|
|
|
counter, 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the counter is in error state, clear that first.
|
|
|
|
* That way, if we see the counter in error state below, we
|
|
|
|
* know that it has gone back into error state, as distinct
|
|
|
|
* from the task having been scheduled away before the
|
|
|
|
* cross-call arrived.
|
|
|
|
*/
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ERROR)
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
task_oncpu_function_call(task, __perf_counter_enable, counter);
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the context is active and the counter is still off,
|
|
|
|
* we need to retry the cross-call.
|
|
|
|
*/
|
|
|
|
if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we have the lock this context can't be scheduled
|
|
|
|
* in, so we can change the state safely.
|
|
|
|
*/
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_OFF)
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
|
|
|
out:
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable a counter and all its children.
|
|
|
|
*/
|
|
|
|
static void perf_counter_enable_family(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
struct perf_counter *child;
|
|
|
|
|
|
|
|
perf_counter_enable(counter);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the mutex to protect the list of children
|
|
|
|
*/
|
|
|
|
mutex_lock(&counter->mutex);
|
|
|
|
list_for_each_entry(child, &counter->child_list, child_list)
|
|
|
|
perf_counter_enable(child);
|
|
|
|
mutex_unlock(&counter->mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
}
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
void __perf_counter_sched_out(struct perf_counter_context *ctx,
|
|
|
|
struct perf_cpu_context *cpuctx)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter;
|
2009-01-09 05:43:42 +00:00
|
|
|
u64 flags;
|
2008-12-21 13:43:25 +00:00
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
ctx->is_active = 0;
|
2008-12-21 13:43:25 +00:00
|
|
|
if (likely(!ctx->nr_counters))
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
goto out;
|
2008-12-21 13:43:25 +00:00
|
|
|
|
2009-01-09 05:43:42 +00:00
|
|
|
flags = hw_perf_save_disable();
|
2008-12-21 13:43:25 +00:00
|
|
|
if (ctx->nr_active) {
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry)
|
|
|
|
group_sched_out(counter, cpuctx, ctx);
|
|
|
|
}
|
2009-01-09 05:43:42 +00:00
|
|
|
hw_perf_restore(flags);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
out:
|
2008-12-21 13:43:25 +00:00
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
/*
|
|
|
|
* Called from scheduler to remove the counters of the current task,
|
|
|
|
* with interrupts disabled.
|
|
|
|
*
|
|
|
|
* We stop each counter and update the counter value in counter->count.
|
|
|
|
*
|
2008-12-17 13:20:28 +00:00
|
|
|
* This does not protect us against NMI, but disable()
|
2008-12-04 19:12:29 +00:00
|
|
|
* sets the disabled bit in the control field of counter _before_
|
|
|
|
* accessing the counter control register. If a NMI hits, then it will
|
|
|
|
* not restart the counter.
|
|
|
|
*/
|
|
|
|
void perf_counter_task_sched_out(struct task_struct *task, int cpu)
|
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
struct perf_counter_context *ctx = &task->perf_counter_ctx;
|
|
|
|
|
|
|
|
if (likely(!cpuctx->task_ctx))
|
|
|
|
return;
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
__perf_counter_sched_out(ctx, cpuctx);
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
cpuctx->task_ctx = NULL;
|
|
|
|
}
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
|
2008-12-11 07:38:42 +00:00
|
|
|
{
|
2008-12-21 13:43:25 +00:00
|
|
|
__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
|
2008-12-11 07:38:42 +00:00
|
|
|
}
|
|
|
|
|
2008-12-17 07:54:56 +00:00
|
|
|
static int
|
2008-12-11 07:38:42 +00:00
|
|
|
group_sched_in(struct perf_counter *group_counter,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx,
|
|
|
|
int cpu)
|
|
|
|
{
|
2008-12-21 12:50:42 +00:00
|
|
|
struct perf_counter *counter, *partial_group;
|
2009-01-09 05:43:42 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (group_counter->state == PERF_COUNTER_STATE_OFF)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
|
|
|
|
if (ret)
|
|
|
|
return ret < 0 ? ret : 0;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
group_counter->prev_state = group_counter->state;
|
2008-12-21 12:50:42 +00:00
|
|
|
if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
|
|
|
|
return -EAGAIN;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Schedule in siblings as one group (if any):
|
|
|
|
*/
|
2008-12-17 07:54:56 +00:00
|
|
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
counter->prev_state = counter->state;
|
2008-12-21 12:50:42 +00:00
|
|
|
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
|
|
|
|
partial_group = counter;
|
|
|
|
goto group_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-09 05:43:42 +00:00
|
|
|
return 0;
|
2008-12-21 12:50:42 +00:00
|
|
|
|
|
|
|
group_error:
|
|
|
|
/*
|
|
|
|
* Groups can be scheduled in as one unit only, so undo any
|
|
|
|
* partial group before returning:
|
|
|
|
*/
|
|
|
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
|
|
|
if (counter == partial_group)
|
|
|
|
break;
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
2008-12-17 07:54:56 +00:00
|
|
|
}
|
2008-12-21 12:50:42 +00:00
|
|
|
counter_sched_out(group_counter, cpuctx, ctx);
|
2008-12-17 07:54:56 +00:00
|
|
|
|
2008-12-21 12:50:42 +00:00
|
|
|
return -EAGAIN;
|
2008-12-11 07:38:42 +00:00
|
|
|
}
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
static void
|
|
|
|
__perf_counter_sched_in(struct perf_counter_context *ctx,
|
|
|
|
struct perf_cpu_context *cpuctx, int cpu)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
|
|
|
struct perf_counter *counter;
|
2009-01-09 05:43:42 +00:00
|
|
|
u64 flags;
|
2009-01-12 04:11:00 +00:00
|
|
|
int can_add_hw = 1;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
ctx->is_active = 1;
|
2008-12-04 19:12:29 +00:00
|
|
|
if (likely(!ctx->nr_counters))
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
goto out;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2009-01-09 05:43:42 +00:00
|
|
|
flags = hw_perf_save_disable();
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First go through the list and put on any pinned groups
|
|
|
|
* in order to give them the best chance of going on.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
|
|
|
if (counter->state <= PERF_COUNTER_STATE_OFF ||
|
|
|
|
!counter->hw_event.pinned)
|
|
|
|
continue;
|
|
|
|
if (counter->cpu != -1 && counter->cpu != cpu)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (group_can_go_on(counter, cpuctx, 1))
|
|
|
|
group_sched_in(counter, cpuctx, ctx, cpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this pinned group hasn't been scheduled,
|
|
|
|
* put it in error state.
|
|
|
|
*/
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
counter->state = PERF_COUNTER_STATE_ERROR;
|
|
|
|
}
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
/*
|
|
|
|
* Ignore counters in OFF or ERROR state, and
|
|
|
|
* ignore pinned counters since we did them already.
|
|
|
|
*/
|
|
|
|
if (counter->state <= PERF_COUNTER_STATE_OFF ||
|
|
|
|
counter->hw_event.pinned)
|
|
|
|
continue;
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
/*
|
|
|
|
* Listen to the 'cpu' scheduling filter constraint
|
|
|
|
* of counters:
|
|
|
|
*/
|
2008-12-04 19:12:29 +00:00
|
|
|
if (counter->cpu != -1 && counter->cpu != cpu)
|
|
|
|
continue;
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (group_can_go_on(counter, cpuctx, can_add_hw)) {
|
2009-01-12 04:11:00 +00:00
|
|
|
if (group_sched_in(counter, cpuctx, ctx, cpu))
|
|
|
|
can_add_hw = 0;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
2009-01-09 05:43:42 +00:00
|
|
|
hw_perf_restore(flags);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
out:
|
2008-12-04 19:12:29 +00:00
|
|
|
spin_unlock(&ctx->lock);
|
2008-12-21 13:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from scheduler to add the counters of the current task
|
|
|
|
* with interrupts disabled.
|
|
|
|
*
|
|
|
|
* We restore the counter value and then enable it.
|
|
|
|
*
|
|
|
|
* This does not protect us against NMI, but enable()
|
|
|
|
* sets the enabled bit in the control field of counter _before_
|
|
|
|
* accessing the counter control register. If a NMI hits, then it will
|
|
|
|
* keep the counter running.
|
|
|
|
*/
|
|
|
|
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
|
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
struct perf_counter_context *ctx = &task->perf_counter_ctx;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
__perf_counter_sched_in(ctx, cpuctx, cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
cpuctx->task_ctx = ctx;
|
|
|
|
}
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
|
|
|
|
{
|
|
|
|
struct perf_counter_context *ctx = &cpuctx->ctx;
|
|
|
|
|
|
|
|
__perf_counter_sched_in(ctx, cpuctx, cpu);
|
|
|
|
}
|
|
|
|
|
2008-12-11 13:59:31 +00:00
|
|
|
int perf_counter_task_disable(void)
|
|
|
|
{
|
|
|
|
struct task_struct *curr = current;
|
|
|
|
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
|
|
|
|
struct perf_counter *counter;
|
2008-12-17 13:10:57 +00:00
|
|
|
unsigned long flags;
|
2008-12-11 13:59:31 +00:00
|
|
|
u64 perf_flags;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (likely(!ctx->nr_counters))
|
|
|
|
return 0;
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_lock_irq_save(&flags);
|
2008-12-11 13:59:31 +00:00
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
/* force the update of the task clock: */
|
|
|
|
__task_delta_exec(curr, 1);
|
|
|
|
|
2008-12-11 13:59:31 +00:00
|
|
|
perf_counter_task_sched_out(curr, cpu);
|
|
|
|
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable all the counters:
|
|
|
|
*/
|
|
|
|
perf_flags = hw_perf_save_disable();
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_ERROR)
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
|
2008-12-11 13:59:31 +00:00
|
|
|
hw_perf_restore(perf_flags);
|
|
|
|
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
2008-12-11 13:59:31 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int perf_counter_task_enable(void)
|
|
|
|
{
|
|
|
|
struct task_struct *curr = current;
|
|
|
|
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
|
|
|
|
struct perf_counter *counter;
|
2008-12-17 13:10:57 +00:00
|
|
|
unsigned long flags;
|
2008-12-11 13:59:31 +00:00
|
|
|
u64 perf_flags;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (likely(!ctx->nr_counters))
|
|
|
|
return 0;
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_lock_irq_save(&flags);
|
2008-12-11 13:59:31 +00:00
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
/* force the update of the task clock: */
|
|
|
|
__task_delta_exec(curr, 1);
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
perf_counter_task_sched_out(curr, cpu);
|
|
|
|
|
2008-12-11 13:59:31 +00:00
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable all the counters:
|
|
|
|
*/
|
|
|
|
perf_flags = hw_perf_save_disable();
|
|
|
|
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (counter->state > PERF_COUNTER_STATE_OFF)
|
2008-12-11 13:59:31 +00:00
|
|
|
continue;
|
2008-12-11 14:17:03 +00:00
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
2008-12-17 13:10:57 +00:00
|
|
|
counter->hw_event.disabled = 0;
|
2008-12-11 13:59:31 +00:00
|
|
|
}
|
|
|
|
hw_perf_restore(perf_flags);
|
|
|
|
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
|
|
|
|
perf_counter_task_sched_in(curr, cpu);
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
2008-12-11 13:59:31 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
/*
|
|
|
|
* Round-robin a context's counters:
|
|
|
|
*/
|
|
|
|
static void rotate_ctx(struct perf_counter_context *ctx)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
|
|
|
struct perf_counter *counter;
|
2008-12-11 12:21:10 +00:00
|
|
|
u64 perf_flags;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
if (!ctx->nr_counters)
|
2008-12-04 19:12:29 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
/*
|
2008-12-11 07:38:42 +00:00
|
|
|
* Rotate the first entry last (works just fine for group counters too):
|
2008-12-04 19:12:29 +00:00
|
|
|
*/
|
2008-12-11 12:45:51 +00:00
|
|
|
perf_flags = hw_perf_save_disable();
|
2008-12-11 07:38:42 +00:00
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
|
|
|
list_del(&counter->list_entry);
|
|
|
|
list_add_tail(&counter->list_entry, &ctx->counter_list);
|
2008-12-04 19:12:29 +00:00
|
|
|
break;
|
|
|
|
}
|
2008-12-11 12:45:51 +00:00
|
|
|
hw_perf_restore(perf_flags);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
spin_unlock(&ctx->lock);
|
2008-12-21 13:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void perf_counter_task_tick(struct task_struct *curr, int cpu)
|
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
|
|
|
|
const int rotate_percpu = 0;
|
|
|
|
|
|
|
|
if (rotate_percpu)
|
|
|
|
perf_counter_cpu_sched_out(cpuctx);
|
|
|
|
perf_counter_task_sched_out(curr, cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
if (rotate_percpu)
|
|
|
|
rotate_ctx(&cpuctx->ctx);
|
|
|
|
rotate_ctx(ctx);
|
|
|
|
|
|
|
|
if (rotate_percpu)
|
|
|
|
perf_counter_cpu_sched_in(cpuctx, cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
perf_counter_task_sched_in(curr, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cross CPU call to read the hardware counter
|
|
|
|
*/
|
2008-12-17 13:20:28 +00:00
|
|
|
static void __read(void *info)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
2008-12-11 11:46:46 +00:00
|
|
|
struct perf_counter *counter = info;
|
2008-12-17 13:10:57 +00:00
|
|
|
unsigned long flags;
|
2008-12-11 11:46:46 +00:00
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_lock_irq_save(&flags);
|
2008-12-17 13:20:28 +00:00
|
|
|
counter->hw_ops->read(counter);
|
2008-12-17 13:10:57 +00:00
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
static u64 perf_counter_read(struct perf_counter *counter)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If counter is enabled and currently active on a CPU, update the
|
|
|
|
* value in the counter structure:
|
|
|
|
*/
|
2008-12-11 14:17:03 +00:00
|
|
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
2008-12-04 19:12:29 +00:00
|
|
|
smp_call_function_single(counter->oncpu,
|
2008-12-17 13:20:28 +00:00
|
|
|
__read, counter, 1);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
2008-12-13 08:00:03 +00:00
|
|
|
return atomic64_read(&counter->count);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cross CPU call to switch performance data pointers
|
|
|
|
*/
|
|
|
|
static void __perf_switch_irq_data(void *info)
|
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
struct perf_data *oldirqdata = counter->irqdata;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a task context, we need to check whether it is
|
|
|
|
* the current task context of this cpu. If not it has been
|
|
|
|
* scheduled out before the smp call arrived.
|
|
|
|
*/
|
|
|
|
if (ctx->task) {
|
|
|
|
if (cpuctx->task_ctx != ctx)
|
|
|
|
return;
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Change the pointer NMI safe */
|
|
|
|
atomic_long_set((atomic_long_t *)&counter->irqdata,
|
|
|
|
(unsigned long) counter->usrdata);
|
|
|
|
counter->usrdata = oldirqdata;
|
|
|
|
|
|
|
|
if (ctx->task)
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
struct perf_data *oldirqdata = counter->irqdata;
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
smp_call_function_single(counter->cpu,
|
|
|
|
__perf_switch_irq_data,
|
|
|
|
counter, 1);
|
|
|
|
return counter->usrdata;
|
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
|
|
|
spin_lock_irq(&ctx->lock);
|
2008-12-11 14:17:03 +00:00
|
|
|
if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
|
2008-12-04 19:12:29 +00:00
|
|
|
counter->irqdata = counter->usrdata;
|
|
|
|
counter->usrdata = oldirqdata;
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
return oldirqdata;
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
task_oncpu_function_call(task, __perf_switch_irq_data, counter);
|
|
|
|
/* Might have failed, because task was scheduled out */
|
|
|
|
if (counter->irqdata == oldirqdata)
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
return counter->usrdata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_context(struct perf_counter_context *ctx)
|
|
|
|
{
|
|
|
|
if (ctx->task)
|
|
|
|
put_task_struct(ctx->task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
struct task_struct *task;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If cpu is not a wildcard then this is a percpu counter:
|
|
|
|
*/
|
|
|
|
if (cpu != -1) {
|
|
|
|
/* Must be root to operate on a CPU counter: */
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return ERR_PTR(-EACCES);
|
|
|
|
|
|
|
|
if (cpu < 0 || cpu > num_possible_cpus())
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could be clever and allow to attach a counter to an
|
|
|
|
* offline CPU and activate it when the CPU comes up, but
|
|
|
|
* that's for later.
|
|
|
|
*/
|
|
|
|
if (!cpu_isset(cpu, cpu_online_map))
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
ctx = &cpuctx->ctx;
|
|
|
|
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
if (!pid)
|
|
|
|
task = current;
|
|
|
|
else
|
|
|
|
task = find_task_by_vpid(pid);
|
|
|
|
if (task)
|
|
|
|
get_task_struct(task);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
return ERR_PTR(-ESRCH);
|
|
|
|
|
|
|
|
ctx = &task->perf_counter_ctx;
|
|
|
|
ctx->task = task;
|
|
|
|
|
|
|
|
/* Reuse ptrace permission checks for now. */
|
|
|
|
if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
|
|
|
|
put_context(ctx);
|
|
|
|
return ERR_PTR(-EACCES);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called when the last reference to the file is gone.
|
|
|
|
*/
|
|
|
|
static int perf_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter = file->private_data;
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
|
|
|
file->private_data = NULL;
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_lock(&ctx->mutex);
|
2008-12-04 19:12:29 +00:00
|
|
|
mutex_lock(&counter->mutex);
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
perf_counter_remove_from_context(counter);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
mutex_unlock(&counter->mutex);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_unlock(&ctx->mutex);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
kfree(counter);
|
2009-02-11 09:53:37 +00:00
|
|
|
put_context(ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the performance counter - simple non blocking version for now
|
|
|
|
*/
|
|
|
|
static ssize_t
|
|
|
|
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
|
|
|
|
{
|
|
|
|
u64 cntval;
|
|
|
|
|
|
|
|
if (count != sizeof(cntval))
|
|
|
|
return -EINVAL;
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
/*
|
|
|
|
* Return end-of-file for a read on a counter that is in
|
|
|
|
* error state (i.e. because it was pinned but it couldn't be
|
|
|
|
* scheduled on to the CPU at some point).
|
|
|
|
*/
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ERROR)
|
|
|
|
return 0;
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
mutex_lock(&counter->mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
cntval = perf_counter_read(counter);
|
2008-12-04 19:12:29 +00:00
|
|
|
mutex_unlock(&counter->mutex);
|
|
|
|
|
|
|
|
return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
|
|
|
|
{
|
|
|
|
if (!usrdata->len)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
count = min(count, (size_t)usrdata->len);
|
|
|
|
if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* Adjust the counters */
|
|
|
|
usrdata->len -= count;
|
|
|
|
if (!usrdata->len)
|
|
|
|
usrdata->rd_idx = 0;
|
|
|
|
else
|
|
|
|
usrdata->rd_idx += count;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
perf_read_irq_data(struct perf_counter *counter,
|
|
|
|
char __user *buf,
|
|
|
|
size_t count,
|
|
|
|
int nonblocking)
|
|
|
|
{
|
|
|
|
struct perf_data *irqdata, *usrdata;
|
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
ssize_t res, res2;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
irqdata = counter->irqdata;
|
|
|
|
usrdata = counter->usrdata;
|
|
|
|
|
|
|
|
if (usrdata->len + irqdata->len >= count)
|
|
|
|
goto read_pending;
|
|
|
|
|
|
|
|
if (nonblocking)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
spin_lock_irq(&counter->waitq.lock);
|
|
|
|
__add_wait_queue(&counter->waitq, &wait);
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
if (usrdata->len + irqdata->len >= count)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (signal_pending(current))
|
|
|
|
break;
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (counter->state == PERF_COUNTER_STATE_ERROR)
|
|
|
|
break;
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
spin_unlock_irq(&counter->waitq.lock);
|
|
|
|
schedule();
|
|
|
|
spin_lock_irq(&counter->waitq.lock);
|
|
|
|
}
|
|
|
|
__remove_wait_queue(&counter->waitq, &wait);
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
spin_unlock_irq(&counter->waitq.lock);
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
if (usrdata->len + irqdata->len < count &&
|
|
|
|
counter->state != PERF_COUNTER_STATE_ERROR)
|
2008-12-04 19:12:29 +00:00
|
|
|
return -ERESTARTSYS;
|
|
|
|
read_pending:
|
|
|
|
mutex_lock(&counter->mutex);
|
|
|
|
|
|
|
|
/* Drain pending data first: */
|
|
|
|
res = perf_copy_usrdata(usrdata, buf, count);
|
|
|
|
if (res < 0 || res == count)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Switch irq buffer: */
|
|
|
|
usrdata = perf_switch_irq_data(counter);
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
res2 = perf_copy_usrdata(usrdata, buf + res, count - res);
|
|
|
|
if (res2 < 0) {
|
2008-12-04 19:12:29 +00:00
|
|
|
if (!res)
|
|
|
|
res = -EFAULT;
|
|
|
|
} else {
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
res += res2;
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
mutex_unlock(&counter->mutex);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter = file->private_data;
|
|
|
|
|
2008-12-10 11:33:23 +00:00
|
|
|
switch (counter->hw_event.record_type) {
|
2008-12-04 19:12:29 +00:00
|
|
|
case PERF_RECORD_SIMPLE:
|
|
|
|
return perf_read_hw(counter, buf, count);
|
|
|
|
|
|
|
|
case PERF_RECORD_IRQ:
|
|
|
|
case PERF_RECORD_GROUP:
|
|
|
|
return perf_read_irq_data(counter, buf, count,
|
|
|
|
file->f_flags & O_NONBLOCK);
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int perf_poll(struct file *file, poll_table *wait)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter = file->private_data;
|
|
|
|
unsigned int events = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
poll_wait(file, &counter->waitq, wait);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&counter->waitq.lock, flags);
|
|
|
|
if (counter->usrdata->len || counter->irqdata->len)
|
|
|
|
events |= POLLIN;
|
|
|
|
spin_unlock_irqrestore(&counter->waitq.lock, flags);
|
|
|
|
|
|
|
|
return events;
|
|
|
|
}
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter = file->private_data;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case PERF_COUNTER_IOC_ENABLE:
|
|
|
|
perf_counter_enable_family(counter);
|
|
|
|
break;
|
|
|
|
case PERF_COUNTER_IOC_DISABLE:
|
|
|
|
perf_counter_disable_family(counter);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -ENOTTY;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
static const struct file_operations perf_fops = {
|
|
|
|
.release = perf_release,
|
|
|
|
.read = perf_read,
|
|
|
|
.poll = perf_poll,
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
.unlocked_ioctl = perf_ioctl,
|
|
|
|
.compat_ioctl = perf_ioctl,
|
2008-12-04 19:12:29 +00:00
|
|
|
};
|
|
|
|
|
2008-12-21 12:50:42 +00:00
|
|
|
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
|
2008-12-11 12:21:10 +00:00
|
|
|
{
|
2009-01-09 05:26:43 +00:00
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
|
|
|
|
atomic64_set(&counter->hw.prev_count, cpu_clock(cpu));
|
2008-12-21 12:50:42 +00:00
|
|
|
return 0;
|
2008-12-11 12:21:10 +00:00
|
|
|
}
|
|
|
|
|
2009-01-09 05:26:43 +00:00
|
|
|
static void cpu_clock_perf_counter_update(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
s64 prev;
|
|
|
|
u64 now;
|
|
|
|
|
|
|
|
now = cpu_clock(cpu);
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
|
|
|
atomic64_set(&counter->hw.prev_count, now);
|
|
|
|
atomic64_add(now - prev, &counter->count);
|
|
|
|
}
|
|
|
|
|
2008-12-11 12:21:10 +00:00
|
|
|
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
|
|
|
|
{
|
2009-01-09 05:26:43 +00:00
|
|
|
cpu_clock_perf_counter_update(counter);
|
2008-12-11 12:21:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_clock_perf_counter_read(struct perf_counter *counter)
|
|
|
|
{
|
2009-01-09 05:26:43 +00:00
|
|
|
cpu_clock_perf_counter_update(counter);
|
2008-12-11 12:21:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
|
2008-12-17 13:20:28 +00:00
|
|
|
.enable = cpu_clock_perf_counter_enable,
|
|
|
|
.disable = cpu_clock_perf_counter_disable,
|
|
|
|
.read = cpu_clock_perf_counter_read,
|
2008-12-11 12:21:10 +00:00
|
|
|
};
|
|
|
|
|
2008-12-17 13:10:57 +00:00
|
|
|
/*
|
|
|
|
* Called from within the scheduler:
|
|
|
|
*/
|
|
|
|
static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
|
2008-12-11 13:03:20 +00:00
|
|
|
{
|
2008-12-17 13:10:57 +00:00
|
|
|
struct task_struct *curr = counter->task;
|
|
|
|
u64 delta;
|
|
|
|
|
|
|
|
delta = __task_delta_exec(curr, update);
|
|
|
|
|
|
|
|
return curr->se.sum_exec_runtime + delta;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
|
|
|
|
{
|
|
|
|
u64 prev;
|
2008-12-14 11:22:31 +00:00
|
|
|
s64 delta;
|
|
|
|
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
|
|
|
|
|
|
|
atomic64_set(&counter->hw.prev_count, now);
|
|
|
|
|
|
|
|
delta = now - prev;
|
|
|
|
|
|
|
|
atomic64_add(delta, &counter->count);
|
2008-12-11 13:03:20 +00:00
|
|
|
}
|
|
|
|
|
2008-12-14 11:22:31 +00:00
|
|
|
static void task_clock_perf_counter_read(struct perf_counter *counter)
|
2008-12-11 13:03:20 +00:00
|
|
|
{
|
2008-12-17 13:10:57 +00:00
|
|
|
u64 now = task_clock_perf_counter_val(counter, 1);
|
|
|
|
|
|
|
|
task_clock_perf_counter_update(counter, now);
|
2008-12-11 13:03:20 +00:00
|
|
|
}
|
|
|
|
|
2008-12-21 12:50:42 +00:00
|
|
|
static int task_clock_perf_counter_enable(struct perf_counter *counter)
|
2008-12-14 11:22:31 +00:00
|
|
|
{
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
|
|
|
|
atomic64_set(&counter->hw.prev_count,
|
|
|
|
task_clock_perf_counter_val(counter, 0));
|
2008-12-21 12:50:42 +00:00
|
|
|
|
|
|
|
return 0;
|
2008-12-14 11:22:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void task_clock_perf_counter_disable(struct perf_counter *counter)
|
2008-12-11 13:03:20 +00:00
|
|
|
{
|
2008-12-17 13:10:57 +00:00
|
|
|
u64 now = task_clock_perf_counter_val(counter, 0);
|
|
|
|
|
|
|
|
task_clock_perf_counter_update(counter, now);
|
2008-12-11 13:03:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hw_perf_counter_ops perf_ops_task_clock = {
|
2008-12-17 13:20:28 +00:00
|
|
|
.enable = task_clock_perf_counter_enable,
|
|
|
|
.disable = task_clock_perf_counter_disable,
|
|
|
|
.read = task_clock_perf_counter_read,
|
2008-12-11 13:03:20 +00:00
|
|
|
};
|
|
|
|
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
#ifdef CONFIG_VM_EVENT_COUNTERS
|
|
|
|
#define cpu_page_faults() __get_cpu_var(vm_event_states).event[PGFAULT]
|
|
|
|
#else
|
|
|
|
#define cpu_page_faults() 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static u64 get_page_faults(struct perf_counter *counter)
|
2008-12-14 13:44:31 +00:00
|
|
|
{
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
struct task_struct *curr = counter->ctx->task;
|
2008-12-14 13:44:31 +00:00
|
|
|
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
if (curr)
|
|
|
|
return curr->maj_flt + curr->min_flt;
|
|
|
|
return cpu_page_faults();
|
2008-12-14 13:44:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void page_faults_perf_counter_update(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
u64 prev, now;
|
|
|
|
s64 delta;
|
|
|
|
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
now = get_page_faults(counter);
|
2008-12-14 13:44:31 +00:00
|
|
|
|
|
|
|
atomic64_set(&counter->hw.prev_count, now);
|
|
|
|
|
|
|
|
delta = now - prev;
|
|
|
|
|
|
|
|
atomic64_add(delta, &counter->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void page_faults_perf_counter_read(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
page_faults_perf_counter_update(counter);
|
|
|
|
}
|
|
|
|
|
2008-12-21 12:50:42 +00:00
|
|
|
static int page_faults_perf_counter_enable(struct perf_counter *counter)
|
2008-12-14 13:44:31 +00:00
|
|
|
{
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
|
|
|
|
atomic64_set(&counter->hw.prev_count, get_page_faults(counter));
|
2008-12-21 12:50:42 +00:00
|
|
|
return 0;
|
2008-12-14 13:44:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void page_faults_perf_counter_disable(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
page_faults_perf_counter_update(counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hw_perf_counter_ops perf_ops_page_faults = {
|
2008-12-17 13:20:28 +00:00
|
|
|
.enable = page_faults_perf_counter_enable,
|
|
|
|
.disable = page_faults_perf_counter_disable,
|
|
|
|
.read = page_faults_perf_counter_read,
|
2008-12-14 13:44:31 +00:00
|
|
|
};
|
|
|
|
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
static u64 get_context_switches(struct perf_counter *counter)
|
2008-12-14 11:28:33 +00:00
|
|
|
{
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
struct task_struct *curr = counter->ctx->task;
|
2008-12-14 11:28:33 +00:00
|
|
|
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
if (curr)
|
|
|
|
return curr->nvcsw + curr->nivcsw;
|
|
|
|
return cpu_nr_switches(smp_processor_id());
|
2008-12-14 11:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void context_switches_perf_counter_update(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
u64 prev, now;
|
|
|
|
s64 delta;
|
|
|
|
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
now = get_context_switches(counter);
|
2008-12-14 11:28:33 +00:00
|
|
|
|
|
|
|
atomic64_set(&counter->hw.prev_count, now);
|
|
|
|
|
|
|
|
delta = now - prev;
|
|
|
|
|
|
|
|
atomic64_add(delta, &counter->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void context_switches_perf_counter_read(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
context_switches_perf_counter_update(counter);
|
|
|
|
}
|
|
|
|
|
2008-12-21 12:50:42 +00:00
|
|
|
static int context_switches_perf_counter_enable(struct perf_counter *counter)
|
2008-12-14 11:28:33 +00:00
|
|
|
{
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
|
|
|
|
atomic64_set(&counter->hw.prev_count,
|
|
|
|
get_context_switches(counter));
|
2008-12-21 12:50:42 +00:00
|
|
|
return 0;
|
2008-12-14 11:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void context_switches_perf_counter_disable(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
context_switches_perf_counter_update(counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hw_perf_counter_ops perf_ops_context_switches = {
|
2008-12-17 13:20:28 +00:00
|
|
|
.enable = context_switches_perf_counter_enable,
|
|
|
|
.disable = context_switches_perf_counter_disable,
|
|
|
|
.read = context_switches_perf_counter_read,
|
2008-12-14 11:28:33 +00:00
|
|
|
};
|
|
|
|
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
static inline u64 get_cpu_migrations(struct perf_counter *counter)
|
2008-12-14 11:34:15 +00:00
|
|
|
{
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
struct task_struct *curr = counter->ctx->task;
|
|
|
|
|
|
|
|
if (curr)
|
|
|
|
return curr->se.nr_migrations;
|
|
|
|
return cpu_nr_migrations(smp_processor_id());
|
2008-12-14 11:34:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
u64 prev, now;
|
|
|
|
s64 delta;
|
|
|
|
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
now = get_cpu_migrations(counter);
|
2008-12-14 11:34:15 +00:00
|
|
|
|
|
|
|
atomic64_set(&counter->hw.prev_count, now);
|
|
|
|
|
|
|
|
delta = now - prev;
|
|
|
|
|
|
|
|
atomic64_add(delta, &counter->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
cpu_migrations_perf_counter_update(counter);
|
|
|
|
}
|
|
|
|
|
2008-12-21 12:50:42 +00:00
|
|
|
static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
|
2008-12-14 11:34:15 +00:00
|
|
|
{
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 11:10:34 +00:00
|
|
|
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
|
|
|
|
atomic64_set(&counter->hw.prev_count,
|
|
|
|
get_cpu_migrations(counter));
|
2008-12-21 12:50:42 +00:00
|
|
|
return 0;
|
2008-12-14 11:34:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
cpu_migrations_perf_counter_update(counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
|
2008-12-17 13:20:28 +00:00
|
|
|
.enable = cpu_migrations_perf_counter_enable,
|
|
|
|
.disable = cpu_migrations_perf_counter_disable,
|
|
|
|
.read = cpu_migrations_perf_counter_read,
|
2008-12-14 11:34:15 +00:00
|
|
|
};
|
|
|
|
|
2008-12-11 12:21:10 +00:00
|
|
|
static const struct hw_perf_counter_ops *
|
|
|
|
sw_perf_counter_init(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
const struct hw_perf_counter_ops *hw_ops = NULL;
|
|
|
|
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 03:35:35 +00:00
|
|
|
/*
|
|
|
|
* Software counters (currently) can't in general distinguish
|
|
|
|
* between user, kernel and hypervisor events.
|
|
|
|
* However, context switches and cpu migrations are considered
|
|
|
|
* to be kernel events, and page faults are never hypervisor
|
|
|
|
* events.
|
|
|
|
*/
|
2008-12-11 12:21:10 +00:00
|
|
|
switch (counter->hw_event.type) {
|
|
|
|
case PERF_COUNT_CPU_CLOCK:
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 03:35:35 +00:00
|
|
|
if (!(counter->hw_event.exclude_user ||
|
|
|
|
counter->hw_event.exclude_kernel ||
|
|
|
|
counter->hw_event.exclude_hv))
|
|
|
|
hw_ops = &perf_ops_cpu_clock;
|
2008-12-11 12:21:10 +00:00
|
|
|
break;
|
2008-12-11 13:03:20 +00:00
|
|
|
case PERF_COUNT_TASK_CLOCK:
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 03:35:35 +00:00
|
|
|
if (counter->hw_event.exclude_user ||
|
|
|
|
counter->hw_event.exclude_kernel ||
|
|
|
|
counter->hw_event.exclude_hv)
|
|
|
|
break;
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
/*
|
|
|
|
* If the user instantiates this as a per-cpu counter,
|
|
|
|
* use the cpu_clock counter instead.
|
|
|
|
*/
|
|
|
|
if (counter->ctx->task)
|
|
|
|
hw_ops = &perf_ops_task_clock;
|
|
|
|
else
|
|
|
|
hw_ops = &perf_ops_cpu_clock;
|
2008-12-11 13:03:20 +00:00
|
|
|
break;
|
2008-12-14 13:44:31 +00:00
|
|
|
case PERF_COUNT_PAGE_FAULTS:
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 03:35:35 +00:00
|
|
|
if (!(counter->hw_event.exclude_user ||
|
|
|
|
counter->hw_event.exclude_kernel))
|
|
|
|
hw_ops = &perf_ops_page_faults;
|
2008-12-14 13:44:31 +00:00
|
|
|
break;
|
2008-12-14 11:28:33 +00:00
|
|
|
case PERF_COUNT_CONTEXT_SWITCHES:
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 03:35:35 +00:00
|
|
|
if (!counter->hw_event.exclude_kernel)
|
|
|
|
hw_ops = &perf_ops_context_switches;
|
2008-12-14 11:28:33 +00:00
|
|
|
break;
|
2008-12-14 11:34:15 +00:00
|
|
|
case PERF_COUNT_CPU_MIGRATIONS:
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 03:35:35 +00:00
|
|
|
if (!counter->hw_event.exclude_kernel)
|
|
|
|
hw_ops = &perf_ops_cpu_migrations;
|
2008-12-14 11:34:15 +00:00
|
|
|
break;
|
2008-12-11 12:21:10 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return hw_ops;
|
|
|
|
}
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
/*
|
|
|
|
* Allocate and initialize a counter structure
|
|
|
|
*/
|
|
|
|
static struct perf_counter *
|
2008-12-11 07:38:42 +00:00
|
|
|
perf_counter_alloc(struct perf_counter_hw_event *hw_event,
|
|
|
|
int cpu,
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
struct perf_counter_context *ctx,
|
2008-12-12 12:49:45 +00:00
|
|
|
struct perf_counter *group_leader,
|
|
|
|
gfp_t gfpflags)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
2008-12-11 12:21:10 +00:00
|
|
|
const struct hw_perf_counter_ops *hw_ops;
|
2008-12-11 11:46:46 +00:00
|
|
|
struct perf_counter *counter;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
counter = kzalloc(sizeof(*counter), gfpflags);
|
2008-12-04 19:12:29 +00:00
|
|
|
if (!counter)
|
|
|
|
return NULL;
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
/*
|
|
|
|
* Single counters are their own group leaders, with an
|
|
|
|
* empty sibling list:
|
|
|
|
*/
|
|
|
|
if (!group_leader)
|
|
|
|
group_leader = counter;
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
mutex_init(&counter->mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
INIT_LIST_HEAD(&counter->list_entry);
|
|
|
|
INIT_LIST_HEAD(&counter->sibling_list);
|
2008-12-04 19:12:29 +00:00
|
|
|
init_waitqueue_head(&counter->waitq);
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
INIT_LIST_HEAD(&counter->child_list);
|
|
|
|
|
2008-12-10 11:33:23 +00:00
|
|
|
counter->irqdata = &counter->data[0];
|
|
|
|
counter->usrdata = &counter->data[1];
|
|
|
|
counter->cpu = cpu;
|
|
|
|
counter->hw_event = *hw_event;
|
|
|
|
counter->wakeup_pending = 0;
|
2008-12-11 07:38:42 +00:00
|
|
|
counter->group_leader = group_leader;
|
2008-12-11 11:46:46 +00:00
|
|
|
counter->hw_ops = NULL;
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
counter->ctx = ctx;
|
2008-12-11 11:46:46 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
2008-12-16 23:43:10 +00:00
|
|
|
if (hw_event->disabled)
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
2008-12-11 12:21:10 +00:00
|
|
|
hw_ops = NULL;
|
|
|
|
if (!hw_event->raw && hw_event->type < 0)
|
|
|
|
hw_ops = sw_perf_counter_init(counter);
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
else
|
2008-12-11 12:21:10 +00:00
|
|
|
hw_ops = hw_perf_counter_init(counter);
|
|
|
|
|
2008-12-11 11:46:46 +00:00
|
|
|
if (!hw_ops) {
|
|
|
|
kfree(counter);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
counter->hw_ops = hw_ops;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
return counter;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2009-03-04 09:36:51 +00:00
|
|
|
* sys_perf_counter_open - open a performance counter, associate it to a task/cpu
|
2008-12-10 11:33:23 +00:00
|
|
|
*
|
|
|
|
* @hw_event_uptr: event type attributes for monitoring/sampling
|
2008-12-04 19:12:29 +00:00
|
|
|
* @pid: target pid
|
2008-12-10 11:33:23 +00:00
|
|
|
* @cpu: target cpu
|
|
|
|
* @group_fd: group leader counter fd
|
2008-12-04 19:12:29 +00:00
|
|
|
*/
|
2009-03-04 09:36:51 +00:00
|
|
|
SYSCALL_DEFINE5(perf_counter_open,
|
2009-02-26 11:43:46 +00:00
|
|
|
const struct perf_counter_hw_event __user *, hw_event_uptr,
|
2009-03-04 09:36:51 +00:00
|
|
|
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
2008-12-11 07:38:42 +00:00
|
|
|
struct perf_counter *counter, *group_leader;
|
2008-12-10 11:33:23 +00:00
|
|
|
struct perf_counter_hw_event hw_event;
|
2008-12-11 07:38:42 +00:00
|
|
|
struct perf_counter_context *ctx;
|
2008-12-12 12:49:45 +00:00
|
|
|
struct file *counter_file = NULL;
|
2008-12-11 07:38:42 +00:00
|
|
|
struct file *group_file = NULL;
|
|
|
|
int fput_needed = 0;
|
2008-12-12 12:49:45 +00:00
|
|
|
int fput_needed2 = 0;
|
2008-12-04 19:12:29 +00:00
|
|
|
int ret;
|
|
|
|
|
2009-03-04 09:36:51 +00:00
|
|
|
/* for future expandability... */
|
|
|
|
if (flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-12-10 11:33:23 +00:00
|
|
|
if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
|
2008-12-08 18:26:59 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
/*
|
2008-12-11 10:26:29 +00:00
|
|
|
* Get the target context (task or percpu):
|
|
|
|
*/
|
|
|
|
ctx = find_get_context(pid, cpu);
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up the group leader (we will attach this counter to it):
|
2008-12-11 07:38:42 +00:00
|
|
|
*/
|
|
|
|
group_leader = NULL;
|
|
|
|
if (group_fd != -1) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
group_file = fget_light(group_fd, &fput_needed);
|
|
|
|
if (!group_file)
|
2008-12-11 10:26:29 +00:00
|
|
|
goto err_put_context;
|
2008-12-11 07:38:42 +00:00
|
|
|
if (group_file->f_op != &perf_fops)
|
2008-12-11 10:26:29 +00:00
|
|
|
goto err_put_context;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
|
|
group_leader = group_file->private_data;
|
|
|
|
/*
|
2008-12-11 10:26:29 +00:00
|
|
|
* Do not allow a recursive hierarchy (this new sibling
|
|
|
|
* becoming part of another group-sibling):
|
|
|
|
*/
|
|
|
|
if (group_leader->group_leader != group_leader)
|
|
|
|
goto err_put_context;
|
|
|
|
/*
|
|
|
|
* Do not allow to attach to a group in a different
|
|
|
|
* task or CPU context:
|
2008-12-11 07:38:42 +00:00
|
|
|
*/
|
2008-12-11 10:26:29 +00:00
|
|
|
if (group_leader->ctx != ctx)
|
|
|
|
goto err_put_context;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
/*
|
|
|
|
* Only a group leader can be exclusive or pinned
|
|
|
|
*/
|
|
|
|
if (hw_event.exclusive || hw_event.pinned)
|
|
|
|
goto err_put_context;
|
2008-12-11 07:38:42 +00:00
|
|
|
}
|
|
|
|
|
2008-12-11 12:21:10 +00:00
|
|
|
ret = -EINVAL;
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
|
|
|
|
GFP_KERNEL);
|
2008-12-04 19:12:29 +00:00
|
|
|
if (!counter)
|
|
|
|
goto err_put_context;
|
|
|
|
|
|
|
|
ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
|
|
|
|
if (ret < 0)
|
2008-12-12 12:49:45 +00:00
|
|
|
goto err_free_put_context;
|
|
|
|
|
|
|
|
counter_file = fget_light(ret, &fput_needed2);
|
|
|
|
if (!counter_file)
|
|
|
|
goto err_free_put_context;
|
|
|
|
|
|
|
|
counter->filp = counter_file;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_lock(&ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
perf_install_in_context(ctx, counter, cpu);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_unlock(&ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
|
|
fput_light(counter_file, fput_needed2);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
out_fput:
|
|
|
|
fput_light(group_file, fput_needed);
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
return ret;
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
err_free_put_context:
|
2008-12-04 19:12:29 +00:00
|
|
|
kfree(counter);
|
|
|
|
|
|
|
|
err_put_context:
|
|
|
|
put_context(ctx);
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
goto out_fput;
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
/*
|
|
|
|
* Initialize the perf_counter context in a task_struct:
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
__perf_counter_init_context(struct perf_counter_context *ctx,
|
|
|
|
struct task_struct *task)
|
|
|
|
{
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
|
|
spin_lock_init(&ctx->lock);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_init(&ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
INIT_LIST_HEAD(&ctx->counter_list);
|
|
|
|
ctx->task = task;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* inherit a counter from parent task to child task:
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
static struct perf_counter *
|
2008-12-12 12:49:45 +00:00
|
|
|
inherit_counter(struct perf_counter *parent_counter,
|
|
|
|
struct task_struct *parent,
|
|
|
|
struct perf_counter_context *parent_ctx,
|
|
|
|
struct task_struct *child,
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
struct perf_counter *group_leader,
|
2008-12-12 12:49:45 +00:00
|
|
|
struct perf_counter_context *child_ctx)
|
|
|
|
{
|
|
|
|
struct perf_counter *child_counter;
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
/*
|
|
|
|
* Instead of creating recursive hierarchies of counters,
|
|
|
|
* we link inherited counters back to the original parent,
|
|
|
|
* which has a filp for sure, which we use as the reference
|
|
|
|
* count:
|
|
|
|
*/
|
|
|
|
if (parent_counter->parent)
|
|
|
|
parent_counter = parent_counter->parent;
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
child_counter = perf_counter_alloc(&parent_counter->hw_event,
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
parent_counter->cpu, child_ctx,
|
|
|
|
group_leader, GFP_KERNEL);
|
2008-12-12 12:49:45 +00:00
|
|
|
if (!child_counter)
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
return NULL;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Link it up in the child's context:
|
|
|
|
*/
|
|
|
|
child_counter->task = child;
|
|
|
|
list_add_counter(child_counter, child_ctx);
|
|
|
|
child_ctx->nr_counters++;
|
|
|
|
|
|
|
|
child_counter->parent = parent_counter;
|
|
|
|
/*
|
|
|
|
* inherit into child's child as well:
|
|
|
|
*/
|
|
|
|
child_counter->hw_event.inherit = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a reference to the parent filp - we will fput it
|
|
|
|
* when the child counter exits. This is safe to do because
|
|
|
|
* we are in the parent and we know that the filp still
|
|
|
|
* exists and has a nonzero count:
|
|
|
|
*/
|
|
|
|
atomic_long_inc(&parent_counter->filp->f_count);
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
/*
|
|
|
|
* Link this into the parent counter's child list
|
|
|
|
*/
|
|
|
|
mutex_lock(&parent_counter->mutex);
|
|
|
|
list_add_tail(&child_counter->child_list, &parent_counter->child_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make the child state follow the state of the parent counter,
|
|
|
|
* not its hw_event.disabled bit. We hold the parent's mutex,
|
|
|
|
* so we won't race with perf_counter_{en,dis}able_family.
|
|
|
|
*/
|
|
|
|
if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
child_counter->state = PERF_COUNTER_STATE_INACTIVE;
|
|
|
|
else
|
|
|
|
child_counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
|
|
|
mutex_unlock(&parent_counter->mutex);
|
|
|
|
|
|
|
|
return child_counter;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int inherit_group(struct perf_counter *parent_counter,
|
|
|
|
struct task_struct *parent,
|
|
|
|
struct perf_counter_context *parent_ctx,
|
|
|
|
struct task_struct *child,
|
|
|
|
struct perf_counter_context *child_ctx)
|
|
|
|
{
|
|
|
|
struct perf_counter *leader;
|
|
|
|
struct perf_counter *sub;
|
|
|
|
|
|
|
|
leader = inherit_counter(parent_counter, parent, parent_ctx,
|
|
|
|
child, NULL, child_ctx);
|
|
|
|
if (!leader)
|
|
|
|
return -ENOMEM;
|
|
|
|
list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
|
|
|
|
if (!inherit_counter(sub, parent, parent_ctx,
|
|
|
|
child, leader, child_ctx))
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
static void sync_child_counter(struct perf_counter *child_counter,
|
|
|
|
struct perf_counter *parent_counter)
|
|
|
|
{
|
|
|
|
u64 parent_val, child_val;
|
|
|
|
|
|
|
|
parent_val = atomic64_read(&parent_counter->count);
|
|
|
|
child_val = atomic64_read(&child_counter->count);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add back the child's count to the parent's count:
|
|
|
|
*/
|
|
|
|
atomic64_add(child_val, &parent_counter->count);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove this counter from the parent's list
|
|
|
|
*/
|
|
|
|
mutex_lock(&parent_counter->mutex);
|
|
|
|
list_del_init(&child_counter->child_list);
|
|
|
|
mutex_unlock(&parent_counter->mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the parent counter, if this was the last
|
|
|
|
* reference to it.
|
|
|
|
*/
|
|
|
|
fput(parent_counter->filp);
|
|
|
|
}
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
static void
|
|
|
|
__perf_counter_exit_task(struct task_struct *child,
|
|
|
|
struct perf_counter *child_counter,
|
|
|
|
struct perf_counter_context *child_ctx)
|
|
|
|
{
|
|
|
|
struct perf_counter *parent_counter;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
struct perf_counter *sub, *tmp;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
|
|
/*
|
2008-12-21 13:43:25 +00:00
|
|
|
* If we do not self-reap then we have to wait for the
|
|
|
|
* child task to unschedule (it will happen for sure),
|
|
|
|
* so that its counter is at its final count. (This
|
|
|
|
* condition triggers rarely - child tasks usually get
|
|
|
|
* off their CPU before the parent has a chance to
|
|
|
|
* get this far into the reaping action)
|
2008-12-12 12:49:45 +00:00
|
|
|
*/
|
2008-12-21 13:43:25 +00:00
|
|
|
if (child != current) {
|
|
|
|
wait_task_inactive(child, 0);
|
|
|
|
list_del_init(&child_counter->list_entry);
|
|
|
|
} else {
|
2008-12-14 22:20:36 +00:00
|
|
|
struct perf_cpu_context *cpuctx;
|
2008-12-21 13:43:25 +00:00
|
|
|
unsigned long flags;
|
|
|
|
u64 perf_flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable and unlink this counter.
|
|
|
|
*
|
|
|
|
* Be careful about zapping the list - IRQ/NMI context
|
|
|
|
* could still be processing it:
|
|
|
|
*/
|
|
|
|
curr_rq_lock_irq_save(&flags);
|
|
|
|
perf_flags = hw_perf_save_disable();
|
2008-12-14 22:20:36 +00:00
|
|
|
|
|
|
|
cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
group_sched_out(child_counter, cpuctx, child_ctx);
|
2008-12-14 22:20:36 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
list_del_init(&child_counter->list_entry);
|
2008-12-14 22:20:36 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
child_ctx->nr_counters--;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
hw_perf_restore(perf_flags);
|
|
|
|
curr_rq_unlock_irq_restore(&flags);
|
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
|
|
parent_counter = child_counter->parent;
|
|
|
|
/*
|
|
|
|
* It can happen that parent exits first, and has counters
|
|
|
|
* that are still around due to the child reference. These
|
|
|
|
* counters need to be zapped - but otherwise linger.
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (parent_counter) {
|
|
|
|
sync_child_counter(child_counter, parent_counter);
|
|
|
|
list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
|
|
|
|
list_entry) {
|
2009-02-11 12:53:19 +00:00
|
|
|
if (sub->parent) {
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
sync_child_counter(sub, sub->parent);
|
2009-02-11 12:53:19 +00:00
|
|
|
kfree(sub);
|
|
|
|
}
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
}
|
2009-01-29 13:06:52 +00:00
|
|
|
kfree(child_counter);
|
2009-02-11 12:53:19 +00:00
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
* When a child task exits, feed back counter values to parent counters.
|
2008-12-12 12:49:45 +00:00
|
|
|
*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
* Note: we may be running in child context, but the PID is not hashed
|
2008-12-12 12:49:45 +00:00
|
|
|
* anymore so new counters will not be added.
|
|
|
|
*/
|
|
|
|
void perf_counter_exit_task(struct task_struct *child)
|
|
|
|
{
|
|
|
|
struct perf_counter *child_counter, *tmp;
|
|
|
|
struct perf_counter_context *child_ctx;
|
|
|
|
|
|
|
|
child_ctx = &child->perf_counter_ctx;
|
|
|
|
|
|
|
|
if (likely(!child_ctx->nr_counters))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
|
|
|
|
list_entry)
|
|
|
|
__perf_counter_exit_task(child, child_counter, child_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the perf_counter context in task_struct
|
|
|
|
*/
|
|
|
|
void perf_counter_init_task(struct task_struct *child)
|
|
|
|
{
|
|
|
|
struct perf_counter_context *child_ctx, *parent_ctx;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
struct perf_counter *counter;
|
2008-12-12 12:49:45 +00:00
|
|
|
struct task_struct *parent = current;
|
|
|
|
|
|
|
|
child_ctx = &child->perf_counter_ctx;
|
|
|
|
parent_ctx = &parent->perf_counter_ctx;
|
|
|
|
|
|
|
|
__perf_counter_init_context(child_ctx, child);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is executed from the parent task context, so inherit
|
|
|
|
* counters that have been marked for cloning:
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (likely(!parent_ctx->nr_counters))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the parent list. No need to lock the child - not PID
|
|
|
|
* hashed yet and not running, so nobody can access it.
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_lock(&parent_ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We dont have to disable NMIs - we are only looking at
|
|
|
|
* the list, not manipulating it:
|
|
|
|
*/
|
|
|
|
list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (!counter->hw_event.inherit)
|
2008-12-12 12:49:45 +00:00
|
|
|
continue;
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
if (inherit_group(counter, parent,
|
2008-12-12 12:49:45 +00:00
|
|
|
parent_ctx, child, child_ctx))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_unlock(&parent_ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
static void __cpuinit perf_counter_init_cpu(int cpu)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
2008-12-11 07:38:42 +00:00
|
|
|
struct perf_cpu_context *cpuctx;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
__perf_counter_init_context(&cpuctx->ctx, NULL);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
mutex_lock(&perf_resource_mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
|
2008-12-04 19:12:29 +00:00
|
|
|
mutex_unlock(&perf_resource_mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
2009-01-14 02:44:19 +00:00
|
|
|
hw_perf_counter_setup(cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
2008-12-11 07:38:42 +00:00
|
|
|
static void __perf_counter_exit_cpu(void *info)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
struct perf_counter_context *ctx = &cpuctx->ctx;
|
|
|
|
struct perf_counter *counter, *tmp;
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
|
|
|
|
__perf_counter_remove_from_context(counter);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
2008-12-11 07:38:42 +00:00
|
|
|
static void perf_counter_exit_cpu(int cpu)
|
2008-12-04 19:12:29 +00:00
|
|
|
{
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
struct perf_counter_context *ctx = &cpuctx->ctx;
|
|
|
|
|
|
|
|
mutex_lock(&ctx->mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
mutex_unlock(&ctx->mutex);
|
2008-12-04 19:12:29 +00:00
|
|
|
}
|
|
|
|
#else
|
2008-12-11 07:38:42 +00:00
|
|
|
static inline void perf_counter_exit_cpu(int cpu) { }
|
2008-12-04 19:12:29 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
static int __cpuinit
|
|
|
|
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
|
{
|
|
|
|
unsigned int cpu = (long)hcpu;
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
|
|
|
|
case CPU_UP_PREPARE:
|
|
|
|
case CPU_UP_PREPARE_FROZEN:
|
2008-12-11 07:38:42 +00:00
|
|
|
perf_counter_init_cpu(cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
2008-12-11 07:38:42 +00:00
|
|
|
perf_counter_exit_cpu(cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block __cpuinitdata perf_cpu_nb = {
|
|
|
|
.notifier_call = perf_cpu_notify,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init perf_counter_init(void)
|
|
|
|
{
|
|
|
|
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
|
|
|
|
(void *)(long)smp_processor_id());
|
|
|
|
register_cpu_notifier(&perf_cpu_nb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_initcall(perf_counter_init);
|
|
|
|
|
|
|
|
static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%d\n", perf_reserved_percpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
perf_set_reserve_percpu(struct sysdev_class *class,
|
|
|
|
const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
|
unsigned long val;
|
|
|
|
int err, cpu, mpt;
|
|
|
|
|
|
|
|
err = strict_strtoul(buf, 10, &val);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
if (val > perf_max_counters)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&perf_resource_mutex);
|
|
|
|
perf_reserved_percpu = val;
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
spin_lock_irq(&cpuctx->ctx.lock);
|
|
|
|
mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
|
|
|
|
perf_max_counters - perf_reserved_percpu);
|
|
|
|
cpuctx->max_pertask = mpt;
|
|
|
|
spin_unlock_irq(&cpuctx->ctx.lock);
|
|
|
|
}
|
|
|
|
mutex_unlock(&perf_resource_mutex);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%d\n", perf_overcommit);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
unsigned long val;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = strict_strtoul(buf, 10, &val);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
if (val > 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&perf_resource_mutex);
|
|
|
|
perf_overcommit = val;
|
|
|
|
mutex_unlock(&perf_resource_mutex);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SYSDEV_CLASS_ATTR(
|
|
|
|
reserve_percpu,
|
|
|
|
0644,
|
|
|
|
perf_show_reserve_percpu,
|
|
|
|
perf_set_reserve_percpu
|
|
|
|
);
|
|
|
|
|
|
|
|
static SYSDEV_CLASS_ATTR(
|
|
|
|
overcommit,
|
|
|
|
0644,
|
|
|
|
perf_show_overcommit,
|
|
|
|
perf_set_overcommit
|
|
|
|
);
|
|
|
|
|
|
|
|
static struct attribute *perfclass_attrs[] = {
|
|
|
|
&attr_reserve_percpu.attr,
|
|
|
|
&attr_overcommit.attr,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group perfclass_attr_group = {
|
|
|
|
.attrs = perfclass_attrs,
|
|
|
|
.name = "perf_counters",
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init perf_counter_sysfs_init(void)
|
|
|
|
{
|
|
|
|
return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
|
|
|
|
&perfclass_attr_group);
|
|
|
|
}
|
|
|
|
device_initcall(perf_counter_sysfs_init);
|