2011-01-30 12:46:46 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
|
*
|
|
|
|
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
|
|
|
* copyright notes.
|
|
|
|
*
|
|
|
|
* Released under the GPL v2. (and only v2, not any later version)
|
|
|
|
*/
|
|
|
|
|
2011-09-06 15:12:26 +00:00
|
|
|
#include <byteswap.h>
|
|
|
|
#include "asm/bug.h"
|
2011-01-03 18:39:04 +00:00
|
|
|
#include "evsel.h"
|
2011-01-12 19:03:24 +00:00
|
|
|
#include "evlist.h"
|
2011-01-03 18:39:04 +00:00
|
|
|
#include "util.h"
|
2011-01-04 01:09:46 +00:00
|
|
|
#include "cpumap.h"
|
2011-01-18 17:15:24 +00:00
|
|
|
#include "thread_map.h"
|
2012-04-26 05:15:22 +00:00
|
|
|
#include "target.h"
|
2011-01-03 18:39:04 +00:00
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
2011-10-25 12:42:19 +00:00
|
|
|
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
|
2011-01-03 19:45:52 +00:00
|
|
|
|
2011-06-02 14:04:54 +00:00
|
|
|
int __perf_evsel__sample_size(u64 sample_type)
|
|
|
|
{
|
|
|
|
u64 mask = sample_type & PERF_SAMPLE_MASK;
|
|
|
|
int size = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
if (mask & (1ULL << i))
|
|
|
|
size++;
|
|
|
|
}
|
|
|
|
|
|
|
|
size *= sizeof(u64);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2012-03-22 13:37:26 +00:00
|
|
|
void hists__init(struct hists *hists)
|
2011-11-04 10:16:58 +00:00
|
|
|
{
|
|
|
|
memset(hists, 0, sizeof(*hists));
|
|
|
|
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
|
|
|
|
hists->entries_in = &hists->entries_in_array[0];
|
|
|
|
hists->entries_collapsed = RB_ROOT;
|
|
|
|
hists->entries = RB_ROOT;
|
|
|
|
pthread_mutex_init(&hists->lock, NULL);
|
|
|
|
}
|
|
|
|
|
2011-01-18 23:41:45 +00:00
|
|
|
void perf_evsel__init(struct perf_evsel *evsel,
|
|
|
|
struct perf_event_attr *attr, int idx)
|
|
|
|
{
|
|
|
|
evsel->idx = idx;
|
|
|
|
evsel->attr = *attr;
|
|
|
|
INIT_LIST_HEAD(&evsel->node);
|
2011-10-05 20:50:23 +00:00
|
|
|
hists__init(&evsel->hists);
|
2011-01-18 23:41:45 +00:00
|
|
|
}
|
|
|
|
|
2011-01-07 03:11:09 +00:00
|
|
|
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
|
2011-01-03 18:39:04 +00:00
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
|
|
|
|
|
2011-01-18 23:41:45 +00:00
|
|
|
if (evsel != NULL)
|
|
|
|
perf_evsel__init(evsel, attr, idx);
|
2011-01-03 18:39:04 +00:00
|
|
|
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
2012-03-16 08:42:20 +00:00
|
|
|
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
|
|
|
|
struct perf_evsel *first)
|
2011-11-08 16:41:57 +00:00
|
|
|
{
|
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
int track = !evsel->idx; /* only the first counter needs these */
|
|
|
|
|
2012-05-14 04:01:28 +00:00
|
|
|
attr->disabled = 1;
|
2012-02-14 16:18:57 +00:00
|
|
|
attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->inherit = !opts->no_inherit;
|
|
|
|
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
|
|
|
|
PERF_FORMAT_TOTAL_TIME_RUNNING |
|
|
|
|
PERF_FORMAT_ID;
|
|
|
|
|
|
|
|
attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We default some events to a 1 default interval. But keep
|
|
|
|
* it a weak assumption overridable by the user.
|
|
|
|
*/
|
|
|
|
if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
|
|
|
|
opts->user_interval != ULLONG_MAX)) {
|
|
|
|
if (opts->freq) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_PERIOD;
|
|
|
|
attr->freq = 1;
|
|
|
|
attr->sample_freq = opts->freq;
|
|
|
|
} else {
|
|
|
|
attr->sample_period = opts->default_interval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opts->no_samples)
|
|
|
|
attr->sample_freq = 0;
|
|
|
|
|
|
|
|
if (opts->inherit_stat)
|
|
|
|
attr->inherit_stat = 1;
|
|
|
|
|
|
|
|
if (opts->sample_address) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_ADDR;
|
|
|
|
attr->mmap_data = track;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opts->call_graph)
|
|
|
|
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
|
|
|
|
|
2012-05-21 01:42:07 +00:00
|
|
|
if (perf_target__has_cpu(&opts->target))
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->sample_type |= PERF_SAMPLE_CPU;
|
|
|
|
|
2011-12-20 14:32:45 +00:00
|
|
|
if (opts->period)
|
|
|
|
attr->sample_type |= PERF_SAMPLE_PERIOD;
|
|
|
|
|
2012-02-14 16:18:57 +00:00
|
|
|
if (!opts->sample_id_all_missing &&
|
2012-05-07 05:09:03 +00:00
|
|
|
(opts->sample_time || !opts->no_inherit ||
|
2012-05-16 09:45:47 +00:00
|
|
|
perf_target__has_cpu(&opts->target)))
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->sample_type |= PERF_SAMPLE_TIME;
|
|
|
|
|
|
|
|
if (opts->raw_samples) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_TIME;
|
|
|
|
attr->sample_type |= PERF_SAMPLE_RAW;
|
|
|
|
attr->sample_type |= PERF_SAMPLE_CPU;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opts->no_delay) {
|
|
|
|
attr->watermark = 0;
|
|
|
|
attr->wakeup_events = 1;
|
|
|
|
}
|
2012-02-09 22:21:02 +00:00
|
|
|
if (opts->branch_stack) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
|
|
|
|
attr->branch_sample_type = opts->branch_stack;
|
|
|
|
}
|
2011-11-08 16:41:57 +00:00
|
|
|
|
|
|
|
attr->mmap = track;
|
|
|
|
attr->comm = track;
|
|
|
|
|
2012-05-07 05:09:03 +00:00
|
|
|
if (perf_target__none(&opts->target) &&
|
|
|
|
(!opts->group || evsel == first)) {
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->enable_on_exec = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-03 18:39:04 +00:00
|
|
|
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
2011-05-27 15:58:34 +00:00
|
|
|
int cpu, thread;
|
2011-01-03 18:39:04 +00:00
|
|
|
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
|
2011-05-27 15:58:34 +00:00
|
|
|
|
|
|
|
if (evsel->fd) {
|
|
|
|
for (cpu = 0; cpu < ncpus; cpu++) {
|
|
|
|
for (thread = 0; thread < nthreads; thread++) {
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-03 18:39:04 +00:00
|
|
|
return evsel->fd != NULL ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2011-01-13 00:39:13 +00:00
|
|
|
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
2011-03-10 14:15:54 +00:00
|
|
|
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
|
|
|
|
if (evsel->sample_id == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
|
|
|
|
if (evsel->id == NULL) {
|
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
evsel->sample_id = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2011-01-13 00:39:13 +00:00
|
|
|
}
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
|
|
|
|
{
|
|
|
|
evsel->counts = zalloc((sizeof(*evsel->counts) +
|
|
|
|
(ncpus * sizeof(struct perf_counts_values))));
|
|
|
|
return evsel->counts != NULL ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2011-01-03 18:39:04 +00:00
|
|
|
void perf_evsel__free_fd(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
xyarray__delete(evsel->fd);
|
|
|
|
evsel->fd = NULL;
|
|
|
|
}
|
|
|
|
|
2011-01-13 00:39:13 +00:00
|
|
|
void perf_evsel__free_id(struct perf_evsel *evsel)
|
|
|
|
{
|
2011-03-10 14:15:54 +00:00
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
evsel->sample_id = NULL;
|
|
|
|
free(evsel->id);
|
2011-01-13 00:39:13 +00:00
|
|
|
evsel->id = NULL;
|
|
|
|
}
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
int cpu, thread;
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < ncpus; cpu++)
|
|
|
|
for (thread = 0; thread < nthreads; ++thread) {
|
|
|
|
close(FD(evsel, cpu, thread));
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-18 23:41:45 +00:00
|
|
|
void perf_evsel__exit(struct perf_evsel *evsel)
|
2011-01-03 18:39:04 +00:00
|
|
|
{
|
|
|
|
assert(list_empty(&evsel->node));
|
|
|
|
xyarray__delete(evsel->fd);
|
2011-03-10 14:15:54 +00:00
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
free(evsel->id);
|
2011-01-18 23:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void perf_evsel__delete(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
perf_evsel__exit(evsel);
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
close_cgroup(evsel->cgrp);
|
2011-02-16 13:10:01 +00:00
|
|
|
free(evsel->name);
|
2011-01-03 18:39:04 +00:00
|
|
|
free(evsel);
|
|
|
|
}
|
2011-01-03 19:45:52 +00:00
|
|
|
|
|
|
|
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread, bool scale)
|
|
|
|
{
|
|
|
|
struct perf_counts_values count;
|
|
|
|
size_t nv = scale ? 3 : 1;
|
|
|
|
|
|
|
|
if (FD(evsel, cpu, thread) < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-01-04 02:13:17 +00:00
|
|
|
if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
if (scale) {
|
|
|
|
if (count.run == 0)
|
|
|
|
count.val = 0;
|
|
|
|
else if (count.run < count.ena)
|
|
|
|
count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
|
|
|
|
} else
|
|
|
|
count.ena = count.run = 0;
|
|
|
|
|
|
|
|
evsel->counts->cpu[cpu] = count;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __perf_evsel__read(struct perf_evsel *evsel,
|
|
|
|
int ncpus, int nthreads, bool scale)
|
|
|
|
{
|
|
|
|
size_t nv = scale ? 3 : 1;
|
|
|
|
int cpu, thread;
|
|
|
|
struct perf_counts_values *aggr = &evsel->counts->aggr, count;
|
|
|
|
|
2011-02-03 19:26:06 +00:00
|
|
|
aggr->val = aggr->ena = aggr->run = 0;
|
2011-01-03 19:45:52 +00:00
|
|
|
|
|
|
|
for (cpu = 0; cpu < ncpus; cpu++) {
|
|
|
|
for (thread = 0; thread < nthreads; thread++) {
|
|
|
|
if (FD(evsel, cpu, thread) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (readn(FD(evsel, cpu, thread),
|
|
|
|
&count, nv * sizeof(u64)) < 0)
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
aggr->val += count.val;
|
|
|
|
if (scale) {
|
|
|
|
aggr->ena += count.ena;
|
|
|
|
aggr->run += count.run;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
evsel->counts->scaled = 0;
|
|
|
|
if (scale) {
|
|
|
|
if (aggr->run == 0) {
|
|
|
|
evsel->counts->scaled = -1;
|
|
|
|
aggr->val = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (aggr->run < aggr->ena) {
|
|
|
|
evsel->counts->scaled = 1;
|
|
|
|
aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
aggr->ena = aggr->run = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2011-01-03 19:48:12 +00:00
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
2011-10-25 12:42:19 +00:00
|
|
|
struct thread_map *threads, bool group,
|
|
|
|
struct xyarray *group_fds)
|
2011-01-03 19:48:12 +00:00
|
|
|
{
|
2011-01-04 13:55:27 +00:00
|
|
|
int cpu, thread;
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
unsigned long flags = 0;
|
2011-10-25 12:42:19 +00:00
|
|
|
int pid = -1, err;
|
2011-01-03 19:48:12 +00:00
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
if (evsel->fd == NULL &&
|
|
|
|
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
|
2011-10-25 12:42:19 +00:00
|
|
|
return -ENOMEM;
|
2011-01-04 02:13:17 +00:00
|
|
|
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
if (evsel->cgrp) {
|
|
|
|
flags = PERF_FLAG_PID_CGROUP;
|
|
|
|
pid = evsel->cgrp->fd;
|
|
|
|
}
|
|
|
|
|
2011-01-04 01:09:46 +00:00
|
|
|
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
2011-10-25 12:42:19 +00:00
|
|
|
int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
|
2011-01-12 02:08:18 +00:00
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
for (thread = 0; thread < threads->nr; thread++) {
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
|
|
|
|
if (!evsel->cgrp)
|
|
|
|
pid = threads->map[thread];
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
pid,
|
2011-01-12 01:42:19 +00:00
|
|
|
cpus->map[cpu],
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
group_fd, flags);
|
2011-10-25 12:42:19 +00:00
|
|
|
if (FD(evsel, cpu, thread) < 0) {
|
|
|
|
err = -errno;
|
2011-01-04 13:55:27 +00:00
|
|
|
goto out_close;
|
2011-10-25 12:42:19 +00:00
|
|
|
}
|
2011-01-12 01:42:19 +00:00
|
|
|
|
|
|
|
if (group && group_fd == -1)
|
|
|
|
group_fd = FD(evsel, cpu, thread);
|
2011-01-04 13:55:27 +00:00
|
|
|
}
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_close:
|
2011-01-04 13:55:27 +00:00
|
|
|
do {
|
|
|
|
while (--thread >= 0) {
|
|
|
|
close(FD(evsel, cpu, thread));
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
thread = threads->nr;
|
|
|
|
} while (--cpu >= 0);
|
2011-10-25 12:42:19 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
if (evsel->fd == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
perf_evsel__close_fd(evsel, ncpus, nthreads);
|
|
|
|
perf_evsel__free_fd(evsel);
|
|
|
|
evsel->fd = NULL;
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
static struct {
|
|
|
|
struct cpu_map map;
|
|
|
|
int cpus[1];
|
|
|
|
} empty_cpu_map = {
|
|
|
|
.map.nr = 1,
|
|
|
|
.cpus = { -1, },
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
struct thread_map map;
|
|
|
|
int threads[1];
|
|
|
|
} empty_thread_map = {
|
|
|
|
.map.nr = 1,
|
|
|
|
.threads = { -1, },
|
|
|
|
};
|
|
|
|
|
2011-01-12 01:42:19 +00:00
|
|
|
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
2011-10-25 12:42:19 +00:00
|
|
|
struct thread_map *threads, bool group,
|
|
|
|
struct xyarray *group_fd)
|
2011-01-03 19:48:12 +00:00
|
|
|
{
|
2011-01-04 13:55:27 +00:00
|
|
|
if (cpus == NULL) {
|
|
|
|
/* Work around old compiler warnings about strict aliasing */
|
|
|
|
cpus = &empty_cpu_map.map;
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
if (threads == NULL)
|
|
|
|
threads = &empty_thread_map.map;
|
2011-01-03 19:48:12 +00:00
|
|
|
|
2011-10-25 12:42:19 +00:00
|
|
|
return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
2011-01-12 01:42:19 +00:00
|
|
|
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
|
2011-10-25 12:42:19 +00:00
|
|
|
struct cpu_map *cpus, bool group,
|
|
|
|
struct xyarray *group_fd)
|
2011-01-03 19:48:12 +00:00
|
|
|
{
|
2011-10-25 12:42:19 +00:00
|
|
|
return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
|
|
|
|
group_fd);
|
2011-01-04 13:55:27 +00:00
|
|
|
}
|
2011-01-03 19:48:12 +00:00
|
|
|
|
2011-01-12 01:42:19 +00:00
|
|
|
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
2011-10-25 12:42:19 +00:00
|
|
|
struct thread_map *threads, bool group,
|
|
|
|
struct xyarray *group_fd)
|
2011-01-04 13:55:27 +00:00
|
|
|
{
|
2011-10-25 12:42:19 +00:00
|
|
|
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
|
|
|
|
group_fd);
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
2011-01-12 19:03:24 +00:00
|
|
|
|
2011-01-29 16:01:45 +00:00
|
|
|
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
|
|
|
|
struct perf_sample *sample)
|
2011-01-21 15:46:41 +00:00
|
|
|
{
|
|
|
|
const u64 *array = event->sample.array;
|
|
|
|
|
|
|
|
array += ((event->header.size -
|
|
|
|
sizeof(event->header)) / sizeof(u64)) - 1;
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
|
|
|
u32 *p = (u32 *)array;
|
|
|
|
sample->cpu = *p;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
sample->stream_id = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
sample->id = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
sample->time = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
|
|
|
u32 *p = (u32 *)array;
|
|
|
|
sample->pid = p[0];
|
|
|
|
sample->tid = p[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-05-21 18:08:15 +00:00
|
|
|
static bool sample_overlap(const union perf_event *event,
|
|
|
|
const void *offset, u64 size)
|
|
|
|
{
|
|
|
|
const void *base = event;
|
|
|
|
|
|
|
|
if (offset + size > base + event->header.size)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-01-29 16:01:45 +00:00
|
|
|
int perf_event__parse_sample(const union perf_event *event, u64 type,
|
2011-05-21 17:33:04 +00:00
|
|
|
int sample_size, bool sample_id_all,
|
2011-09-06 15:12:26 +00:00
|
|
|
struct perf_sample *data, bool swapped)
|
2011-01-21 15:46:41 +00:00
|
|
|
{
|
|
|
|
const u64 *array;
|
|
|
|
|
2011-09-06 15:12:26 +00:00
|
|
|
/*
|
|
|
|
* used for cross-endian analysis. See git commit 65014ab3
|
|
|
|
* for why this goofiness is needed.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
u64 val64;
|
|
|
|
u32 val32[2];
|
|
|
|
} u;
|
|
|
|
|
2011-12-15 16:32:39 +00:00
|
|
|
memset(data, 0, sizeof(*data));
|
2011-01-21 15:46:41 +00:00
|
|
|
data->cpu = data->pid = data->tid = -1;
|
|
|
|
data->stream_id = data->id = data->time = -1ULL;
|
2012-02-03 17:01:13 +00:00
|
|
|
data->period = 1;
|
2011-01-21 15:46:41 +00:00
|
|
|
|
|
|
|
if (event->header.type != PERF_RECORD_SAMPLE) {
|
|
|
|
if (!sample_id_all)
|
|
|
|
return 0;
|
2011-01-29 16:01:45 +00:00
|
|
|
return perf_event__parse_id_sample(event, type, data);
|
2011-01-21 15:46:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
array = event->sample.array;
|
|
|
|
|
2011-05-21 17:33:04 +00:00
|
|
|
if (sample_size + sizeof(event->header) > event->header.size)
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-01-21 15:46:41 +00:00
|
|
|
if (type & PERF_SAMPLE_IP) {
|
|
|
|
data->ip = event->ip.ip;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
2011-09-06 15:12:26 +00:00
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
data->pid = u.val32[0];
|
|
|
|
data->tid = u.val32[1];
|
2011-01-21 15:46:41 +00:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
data->time = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2011-05-30 19:08:23 +00:00
|
|
|
data->addr = 0;
|
2011-01-21 15:46:41 +00:00
|
|
|
if (type & PERF_SAMPLE_ADDR) {
|
|
|
|
data->addr = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
data->id = -1ULL;
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
data->id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
data->stream_id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
2011-09-06 15:12:26 +00:00
|
|
|
|
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
data->cpu = u.val32[0];
|
2011-01-21 15:46:41 +00:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_PERIOD) {
|
|
|
|
data->period = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_READ) {
|
2012-01-25 14:20:40 +00:00
|
|
|
fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
|
2011-01-21 15:46:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CALLCHAIN) {
|
2011-05-21 18:08:15 +00:00
|
|
|
if (sample_overlap(event, array, sizeof(data->callchain->nr)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-01-21 15:46:41 +00:00
|
|
|
data->callchain = (struct ip_callchain *)array;
|
2011-05-21 18:08:15 +00:00
|
|
|
|
|
|
|
if (sample_overlap(event, array, data->callchain->nr))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-01-21 15:46:41 +00:00
|
|
|
array += 1 + data->callchain->nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_RAW) {
|
2011-09-29 15:05:08 +00:00
|
|
|
const u64 *pdata;
|
|
|
|
|
2011-09-06 15:12:26 +00:00
|
|
|
u.val64 = *array;
|
|
|
|
if (WARN_ONCE(swapped,
|
|
|
|
"Endianness of raw data not corrected!\n")) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
2011-05-21 18:08:15 +00:00
|
|
|
|
|
|
|
if (sample_overlap(event, array, sizeof(u32)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-09-06 15:12:26 +00:00
|
|
|
data->raw_size = u.val32[0];
|
2011-09-29 15:05:08 +00:00
|
|
|
pdata = (void *) array + sizeof(u32);
|
2011-05-21 18:08:15 +00:00
|
|
|
|
2011-09-29 15:05:08 +00:00
|
|
|
if (sample_overlap(event, pdata, data->raw_size))
|
2011-05-21 18:08:15 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2011-09-29 15:05:08 +00:00
|
|
|
data->raw_data = (void *) pdata;
|
2012-03-17 22:23:18 +00:00
|
|
|
|
|
|
|
array = (void *)array + data->raw_size + sizeof(u32);
|
2011-01-21 15:46:41 +00:00
|
|
|
}
|
|
|
|
|
2012-02-09 22:21:01 +00:00
|
|
|
if (type & PERF_SAMPLE_BRANCH_STACK) {
|
|
|
|
u64 sz;
|
|
|
|
|
|
|
|
data->branch_stack = (struct branch_stack *)array;
|
|
|
|
array++; /* nr */
|
|
|
|
|
|
|
|
sz = data->branch_stack->nr * sizeof(struct branch_entry);
|
|
|
|
sz /= sizeof(u64);
|
|
|
|
array += sz;
|
|
|
|
}
|
2011-01-21 15:46:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-11-28 09:03:31 +00:00
|
|
|
|
|
|
|
int perf_event__synthesize_sample(union perf_event *event, u64 type,
|
|
|
|
const struct perf_sample *sample,
|
|
|
|
bool swapped)
|
|
|
|
{
|
|
|
|
u64 *array;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* used for cross-endian analysis. See git commit 65014ab3
|
|
|
|
* for why this goofiness is needed.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
u64 val64;
|
|
|
|
u32 val32[2];
|
|
|
|
} u;
|
|
|
|
|
|
|
|
array = event->sample.array;
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_IP) {
|
|
|
|
event->ip.ip = sample->ip;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
|
|
|
u.val32[0] = sample->pid;
|
|
|
|
u.val32[1] = sample->tid;
|
|
|
|
if (swapped) {
|
|
|
|
/*
|
|
|
|
* Inverse of what is done in perf_event__parse_sample
|
|
|
|
*/
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
}
|
|
|
|
|
|
|
|
*array = u.val64;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
*array = sample->time;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ADDR) {
|
|
|
|
*array = sample->addr;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
*array = sample->id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
*array = sample->stream_id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
|
|
|
u.val32[0] = sample->cpu;
|
|
|
|
if (swapped) {
|
|
|
|
/*
|
|
|
|
* Inverse of what is done in perf_event__parse_sample
|
|
|
|
*/
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
}
|
|
|
|
*array = u.val64;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_PERIOD) {
|
|
|
|
*array = sample->period;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|