2011-01-30 12:46:46 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
|
*
|
|
|
|
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
|
|
|
* copyright notes.
|
|
|
|
*
|
|
|
|
* Released under the GPL v2. (and only v2, not any later version)
|
|
|
|
*/
|
|
|
|
|
2011-09-06 15:12:26 +00:00
|
|
|
#include <byteswap.h>
|
2012-08-07 13:20:45 +00:00
|
|
|
#include <linux/bitops.h>
|
2013-12-09 16:14:23 +00:00
|
|
|
#include <api/fs/debugfs.h>
|
2013-06-11 15:29:18 +00:00
|
|
|
#include <traceevent/event-parse.h>
|
|
|
|
#include <linux/hw_breakpoint.h>
|
|
|
|
#include <linux/perf_event.h>
|
2013-08-05 02:41:26 +00:00
|
|
|
#include <sys/resource.h>
|
2013-06-11 15:29:18 +00:00
|
|
|
#include "asm/bug.h"
|
2014-10-09 19:12:24 +00:00
|
|
|
#include "callchain.h"
|
2014-10-17 15:17:40 +00:00
|
|
|
#include "cgroup.h"
|
2011-01-03 18:39:04 +00:00
|
|
|
#include "evsel.h"
|
2011-01-12 19:03:24 +00:00
|
|
|
#include "evlist.h"
|
2011-01-03 18:39:04 +00:00
|
|
|
#include "util.h"
|
2011-01-04 01:09:46 +00:00
|
|
|
#include "cpumap.h"
|
2011-01-18 17:15:24 +00:00
|
|
|
#include "thread_map.h"
|
2012-04-26 05:15:22 +00:00
|
|
|
#include "target.h"
|
2012-08-07 13:20:47 +00:00
|
|
|
#include "perf_regs.h"
|
2013-08-14 12:48:24 +00:00
|
|
|
#include "debug.h"
|
2013-12-03 13:09:24 +00:00
|
|
|
#include "trace-event.h"
|
2015-06-14 08:19:26 +00:00
|
|
|
#include "stat.h"
|
2011-01-03 18:39:04 +00:00
|
|
|
|
2012-12-13 16:13:07 +00:00
|
|
|
static struct {
|
|
|
|
bool sample_id_all;
|
|
|
|
bool exclude_guest;
|
2013-08-21 10:10:25 +00:00
|
|
|
bool mmap2;
|
2014-06-30 20:28:47 +00:00
|
|
|
bool cloexec;
|
2015-03-30 22:19:31 +00:00
|
|
|
bool clockid;
|
|
|
|
bool clockid_wrong;
|
2012-12-13 16:13:07 +00:00
|
|
|
} perf_missing_features;
|
|
|
|
|
2015-03-30 22:19:31 +00:00
|
|
|
static clockid_t clockid;
|
|
|
|
|
2014-10-09 18:29:51 +00:00
|
|
|
static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
size_t size;
|
|
|
|
int (*init)(struct perf_evsel *evsel);
|
|
|
|
void (*fini)(struct perf_evsel *evsel);
|
|
|
|
} perf_evsel__object = {
|
|
|
|
.size = sizeof(struct perf_evsel),
|
|
|
|
.init = perf_evsel__no_extra_init,
|
|
|
|
.fini = perf_evsel__no_extra_fini,
|
|
|
|
};
|
|
|
|
|
|
|
|
int perf_evsel__object_config(size_t object_size,
|
|
|
|
int (*init)(struct perf_evsel *evsel),
|
|
|
|
void (*fini)(struct perf_evsel *evsel))
|
|
|
|
{
|
|
|
|
|
|
|
|
if (object_size == 0)
|
|
|
|
goto set_methods;
|
|
|
|
|
|
|
|
if (perf_evsel__object.size > object_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
perf_evsel__object.size = object_size;
|
|
|
|
|
|
|
|
set_methods:
|
|
|
|
if (init != NULL)
|
|
|
|
perf_evsel__object.init = init;
|
|
|
|
|
|
|
|
if (fini != NULL)
|
|
|
|
perf_evsel__object.fini = fini;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
|
|
|
|
2013-08-27 08:23:09 +00:00
|
|
|
int __perf_evsel__sample_size(u64 sample_type)
|
2011-06-02 14:04:54 +00:00
|
|
|
{
|
|
|
|
u64 mask = sample_type & PERF_SAMPLE_MASK;
|
|
|
|
int size = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
if (mask & (1ULL << i))
|
|
|
|
size++;
|
|
|
|
}
|
|
|
|
|
|
|
|
size *= sizeof(u64);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2013-08-27 08:23:09 +00:00
|
|
|
/**
|
|
|
|
* __perf_evsel__calc_id_pos - calculate id_pos.
|
|
|
|
* @sample_type: sample type
|
|
|
|
*
|
|
|
|
* This function returns the position of the event id (PERF_SAMPLE_ID or
|
|
|
|
* PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
|
|
|
|
* sample_event.
|
|
|
|
*/
|
|
|
|
static int __perf_evsel__calc_id_pos(u64 sample_type)
|
|
|
|
{
|
|
|
|
int idx = 0;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_IDENTIFIER)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(sample_type & PERF_SAMPLE_ID))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_IP)
|
|
|
|
idx += 1;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_TID)
|
|
|
|
idx += 1;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_TIME)
|
|
|
|
idx += 1;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_ADDR)
|
|
|
|
idx += 1;
|
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __perf_evsel__calc_is_pos - calculate is_pos.
|
|
|
|
* @sample_type: sample type
|
|
|
|
*
|
|
|
|
* This function returns the position (counting backwards) of the event id
|
|
|
|
* (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
|
|
|
|
* sample_id_all is used there is an id sample appended to non-sample events.
|
|
|
|
*/
|
|
|
|
static int __perf_evsel__calc_is_pos(u64 sample_type)
|
|
|
|
{
|
|
|
|
int idx = 1;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_IDENTIFIER)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!(sample_type & PERF_SAMPLE_ID))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_CPU)
|
|
|
|
idx += 1;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
|
|
|
idx += 1;
|
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
|
|
|
|
evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
|
|
|
|
}
|
|
|
|
|
2012-12-10 17:53:43 +00:00
|
|
|
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
|
|
|
|
enum perf_event_sample_format bit)
|
|
|
|
{
|
|
|
|
if (!(evsel->attr.sample_type & bit)) {
|
|
|
|
evsel->attr.sample_type |= bit;
|
|
|
|
evsel->sample_size += sizeof(u64);
|
2013-08-27 08:23:09 +00:00
|
|
|
perf_evsel__calc_id_pos(evsel);
|
2012-12-10 17:53:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
|
|
|
|
enum perf_event_sample_format bit)
|
|
|
|
{
|
|
|
|
if (evsel->attr.sample_type & bit) {
|
|
|
|
evsel->attr.sample_type &= ~bit;
|
|
|
|
evsel->sample_size -= sizeof(u64);
|
2013-08-27 08:23:09 +00:00
|
|
|
perf_evsel__calc_id_pos(evsel);
|
2012-12-10 17:53:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-27 08:23:09 +00:00
|
|
|
void perf_evsel__set_sample_id(struct perf_evsel *evsel,
|
|
|
|
bool can_sample_identifier)
|
2012-12-10 18:21:30 +00:00
|
|
|
{
|
2013-08-27 08:23:09 +00:00
|
|
|
if (can_sample_identifier) {
|
|
|
|
perf_evsel__reset_sample_bit(evsel, ID);
|
|
|
|
perf_evsel__set_sample_bit(evsel, IDENTIFIER);
|
|
|
|
} else {
|
|
|
|
perf_evsel__set_sample_bit(evsel, ID);
|
|
|
|
}
|
2012-12-10 18:21:30 +00:00
|
|
|
evsel->attr.read_format |= PERF_FORMAT_ID;
|
|
|
|
}
|
|
|
|
|
2011-01-18 23:41:45 +00:00
|
|
|
void perf_evsel__init(struct perf_evsel *evsel,
|
|
|
|
struct perf_event_attr *attr, int idx)
|
|
|
|
{
|
|
|
|
evsel->idx = idx;
|
2014-07-31 06:00:52 +00:00
|
|
|
evsel->tracking = !idx;
|
2011-01-18 23:41:45 +00:00
|
|
|
evsel->attr = *attr;
|
2012-11-29 06:38:29 +00:00
|
|
|
evsel->leader = evsel;
|
2013-11-12 16:58:49 +00:00
|
|
|
evsel->unit = "";
|
|
|
|
evsel->scale = 1.0;
|
2011-01-18 23:41:45 +00:00
|
|
|
INIT_LIST_HEAD(&evsel->node);
|
2014-10-09 18:29:51 +00:00
|
|
|
perf_evsel__object.init(evsel);
|
2012-08-01 21:53:11 +00:00
|
|
|
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
|
2013-08-27 08:23:09 +00:00
|
|
|
perf_evsel__calc_id_pos(evsel);
|
2015-07-10 07:36:09 +00:00
|
|
|
evsel->cmdline_group_boundary = false;
|
2011-01-18 23:41:45 +00:00
|
|
|
}
|
|
|
|
|
2013-11-07 19:41:19 +00:00
|
|
|
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
|
2011-01-03 18:39:04 +00:00
|
|
|
{
|
2014-10-09 18:29:51 +00:00
|
|
|
struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
|
2011-01-03 18:39:04 +00:00
|
|
|
|
2011-01-18 23:41:45 +00:00
|
|
|
if (evsel != NULL)
|
|
|
|
perf_evsel__init(evsel, attr, idx);
|
2011-01-03 18:39:04 +00:00
|
|
|
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
2013-11-07 19:41:19 +00:00
|
|
|
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
|
2012-09-18 14:21:50 +00:00
|
|
|
{
|
2014-10-09 18:29:51 +00:00
|
|
|
struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
|
2012-09-18 14:21:50 +00:00
|
|
|
|
|
|
|
if (evsel != NULL) {
|
|
|
|
struct perf_event_attr attr = {
|
2012-09-26 15:28:26 +00:00
|
|
|
.type = PERF_TYPE_TRACEPOINT,
|
|
|
|
.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
|
|
|
|
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
|
2012-09-18 14:21:50 +00:00
|
|
|
};
|
|
|
|
|
2012-09-26 20:11:38 +00:00
|
|
|
if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
|
|
|
|
goto out_free;
|
|
|
|
|
2013-12-03 13:09:24 +00:00
|
|
|
evsel->tp_format = trace_event__tp_format(sys, name);
|
2012-09-18 14:21:50 +00:00
|
|
|
if (evsel->tp_format == NULL)
|
|
|
|
goto out_free;
|
|
|
|
|
2012-09-26 15:28:26 +00:00
|
|
|
event_attr_init(&attr);
|
2012-09-18 14:21:50 +00:00
|
|
|
attr.config = evsel->tp_format->id;
|
2012-09-26 15:28:26 +00:00
|
|
|
attr.sample_period = 1;
|
2012-09-18 14:21:50 +00:00
|
|
|
perf_evsel__init(evsel, &attr, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
return evsel;
|
|
|
|
|
|
|
|
out_free:
|
2013-12-27 19:55:14 +00:00
|
|
|
zfree(&evsel->name);
|
2012-09-18 14:21:50 +00:00
|
|
|
free(evsel);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-09-06 16:11:18 +00:00
|
|
|
const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
|
2012-05-25 19:38:11 +00:00
|
|
|
"cycles",
|
|
|
|
"instructions",
|
|
|
|
"cache-references",
|
|
|
|
"cache-misses",
|
|
|
|
"branches",
|
|
|
|
"branch-misses",
|
|
|
|
"bus-cycles",
|
|
|
|
"stalled-cycles-frontend",
|
|
|
|
"stalled-cycles-backend",
|
|
|
|
"ref-cycles",
|
|
|
|
};
|
|
|
|
|
2012-06-13 18:52:42 +00:00
|
|
|
static const char *__perf_evsel__hw_name(u64 config)
|
2012-05-25 19:38:11 +00:00
|
|
|
{
|
|
|
|
if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
|
|
|
|
return perf_evsel__hw_names[config];
|
|
|
|
|
|
|
|
return "unknown-hardware";
|
|
|
|
}
|
|
|
|
|
2012-06-11 16:33:09 +00:00
|
|
|
static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
|
2012-05-25 19:38:11 +00:00
|
|
|
{
|
2012-06-11 16:33:09 +00:00
|
|
|
int colon = 0, r = 0;
|
2012-05-25 19:38:11 +00:00
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
bool exclude_guest_default = false;
|
|
|
|
|
|
|
|
#define MOD_PRINT(context, mod) do { \
|
|
|
|
if (!attr->exclude_##context) { \
|
2012-06-11 16:33:09 +00:00
|
|
|
if (!colon) colon = ++r; \
|
2012-05-25 19:38:11 +00:00
|
|
|
r += scnprintf(bf + r, size - r, "%c", mod); \
|
|
|
|
} } while(0)
|
|
|
|
|
|
|
|
if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
|
|
|
|
MOD_PRINT(kernel, 'k');
|
|
|
|
MOD_PRINT(user, 'u');
|
|
|
|
MOD_PRINT(hv, 'h');
|
|
|
|
exclude_guest_default = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr->precise_ip) {
|
|
|
|
if (!colon)
|
2012-06-11 16:33:09 +00:00
|
|
|
colon = ++r;
|
2012-05-25 19:38:11 +00:00
|
|
|
r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
|
|
|
|
exclude_guest_default = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
|
|
|
|
MOD_PRINT(host, 'H');
|
|
|
|
MOD_PRINT(guest, 'G');
|
|
|
|
}
|
|
|
|
#undef MOD_PRINT
|
|
|
|
if (colon)
|
2012-06-11 16:33:09 +00:00
|
|
|
bf[colon - 1] = ':';
|
2012-05-25 19:38:11 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2012-06-11 16:33:09 +00:00
|
|
|
static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
|
|
|
|
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
|
|
|
|
}
|
|
|
|
|
2012-09-06 16:11:18 +00:00
|
|
|
const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
|
2012-06-11 17:36:20 +00:00
|
|
|
"cpu-clock",
|
|
|
|
"task-clock",
|
|
|
|
"page-faults",
|
|
|
|
"context-switches",
|
2012-09-06 16:11:18 +00:00
|
|
|
"cpu-migrations",
|
2012-06-11 17:36:20 +00:00
|
|
|
"minor-faults",
|
|
|
|
"major-faults",
|
|
|
|
"alignment-faults",
|
|
|
|
"emulation-faults",
|
2013-08-31 18:50:52 +00:00
|
|
|
"dummy",
|
2012-06-11 17:36:20 +00:00
|
|
|
};
|
|
|
|
|
2012-06-13 18:52:42 +00:00
|
|
|
static const char *__perf_evsel__sw_name(u64 config)
|
2012-06-11 17:36:20 +00:00
|
|
|
{
|
|
|
|
if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
|
|
|
|
return perf_evsel__sw_names[config];
|
|
|
|
return "unknown-software";
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
|
|
|
|
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
|
|
|
|
}
|
|
|
|
|
2012-06-28 21:18:49 +00:00
|
|
|
static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
|
|
|
|
|
|
|
|
if (type & HW_BREAKPOINT_R)
|
|
|
|
r += scnprintf(bf + r, size - r, "r");
|
|
|
|
|
|
|
|
if (type & HW_BREAKPOINT_W)
|
|
|
|
r += scnprintf(bf + r, size - r, "w");
|
|
|
|
|
|
|
|
if (type & HW_BREAKPOINT_X)
|
|
|
|
r += scnprintf(bf + r, size - r, "x");
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
|
|
|
|
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
|
|
|
|
}
|
|
|
|
|
2012-06-11 17:08:07 +00:00
|
|
|
const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES] = {
|
|
|
|
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
|
|
|
|
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
|
|
|
|
{ "LLC", "L2", },
|
|
|
|
{ "dTLB", "d-tlb", "Data-TLB", },
|
|
|
|
{ "iTLB", "i-tlb", "Instruction-TLB", },
|
|
|
|
{ "branch", "branches", "bpu", "btb", "bpc", },
|
|
|
|
{ "node", },
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES] = {
|
|
|
|
{ "load", "loads", "read", },
|
|
|
|
{ "store", "stores", "write", },
|
|
|
|
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES] = {
|
|
|
|
{ "refs", "Reference", "ops", "access", },
|
|
|
|
{ "misses", "miss", },
|
|
|
|
};
|
|
|
|
|
|
|
|
#define C(x) PERF_COUNT_HW_CACHE_##x
|
|
|
|
#define CACHE_READ (1 << C(OP_READ))
|
|
|
|
#define CACHE_WRITE (1 << C(OP_WRITE))
|
|
|
|
#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
|
|
|
|
#define COP(x) (1 << x)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cache operartion stat
|
|
|
|
* L1I : Read and prefetch only
|
|
|
|
* ITLB and BPU : Read-only
|
|
|
|
*/
|
|
|
|
static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
|
|
|
|
[C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
|
|
|
|
[C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(ITLB)] = (CACHE_READ),
|
|
|
|
[C(BPU)] = (CACHE_READ),
|
|
|
|
[C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
};
|
|
|
|
|
|
|
|
bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
|
|
|
|
{
|
|
|
|
if (perf_evsel__hw_cache_stat[type] & COP(op))
|
|
|
|
return true; /* valid */
|
|
|
|
else
|
|
|
|
return false; /* invalid */
|
|
|
|
}
|
|
|
|
|
|
|
|
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
|
|
|
|
char *bf, size_t size)
|
|
|
|
{
|
|
|
|
if (result) {
|
|
|
|
return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
|
|
|
|
perf_evsel__hw_cache_op[op][0],
|
|
|
|
perf_evsel__hw_cache_result[result][0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
|
|
|
|
perf_evsel__hw_cache_op[op][1]);
|
|
|
|
}
|
|
|
|
|
2012-06-13 18:52:42 +00:00
|
|
|
static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
|
2012-06-11 17:08:07 +00:00
|
|
|
{
|
|
|
|
u8 op, result, type = (config >> 0) & 0xff;
|
|
|
|
const char *err = "unknown-ext-hardware-cache-type";
|
|
|
|
|
|
|
|
if (type > PERF_COUNT_HW_CACHE_MAX)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
op = (config >> 8) & 0xff;
|
|
|
|
err = "unknown-ext-hardware-cache-op";
|
|
|
|
if (op > PERF_COUNT_HW_CACHE_OP_MAX)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
result = (config >> 16) & 0xff;
|
|
|
|
err = "unknown-ext-hardware-cache-result";
|
|
|
|
if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
err = "invalid-cache";
|
|
|
|
if (!perf_evsel__is_cache_op_valid(type, op))
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
|
|
|
|
out_err:
|
|
|
|
return scnprintf(bf, size, "%s", err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
|
|
|
|
return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
|
|
|
|
}
|
|
|
|
|
2012-06-13 14:53:37 +00:00
|
|
|
static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
|
|
|
|
return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
|
|
|
|
}
|
|
|
|
|
2012-06-12 15:34:58 +00:00
|
|
|
const char *perf_evsel__name(struct perf_evsel *evsel)
|
2012-06-12 13:29:12 +00:00
|
|
|
{
|
2012-06-12 15:34:58 +00:00
|
|
|
char bf[128];
|
2012-06-12 13:29:12 +00:00
|
|
|
|
2012-06-12 15:34:58 +00:00
|
|
|
if (evsel->name)
|
|
|
|
return evsel->name;
|
2012-05-25 19:38:11 +00:00
|
|
|
|
|
|
|
switch (evsel->attr.type) {
|
|
|
|
case PERF_TYPE_RAW:
|
2012-06-13 14:53:37 +00:00
|
|
|
perf_evsel__raw_name(evsel, bf, sizeof(bf));
|
2012-05-25 19:38:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PERF_TYPE_HARDWARE:
|
2012-06-12 15:34:58 +00:00
|
|
|
perf_evsel__hw_name(evsel, bf, sizeof(bf));
|
2012-05-25 19:38:11 +00:00
|
|
|
break;
|
2012-06-11 17:08:07 +00:00
|
|
|
|
|
|
|
case PERF_TYPE_HW_CACHE:
|
2012-06-12 15:34:58 +00:00
|
|
|
perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
|
2012-06-11 17:08:07 +00:00
|
|
|
break;
|
|
|
|
|
2012-06-11 17:36:20 +00:00
|
|
|
case PERF_TYPE_SOFTWARE:
|
2012-06-12 15:34:58 +00:00
|
|
|
perf_evsel__sw_name(evsel, bf, sizeof(bf));
|
2012-06-11 17:36:20 +00:00
|
|
|
break;
|
|
|
|
|
2012-06-12 13:29:12 +00:00
|
|
|
case PERF_TYPE_TRACEPOINT:
|
2012-06-12 15:34:58 +00:00
|
|
|
scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
|
2012-06-12 13:29:12 +00:00
|
|
|
break;
|
|
|
|
|
2012-06-28 21:18:49 +00:00
|
|
|
case PERF_TYPE_BREAKPOINT:
|
|
|
|
perf_evsel__bp_name(evsel, bf, sizeof(bf));
|
|
|
|
break;
|
|
|
|
|
2012-05-25 19:38:11 +00:00
|
|
|
default:
|
2012-08-16 19:10:18 +00:00
|
|
|
scnprintf(bf, sizeof(bf), "unknown attr type: %d",
|
|
|
|
evsel->attr.type);
|
2012-06-12 13:29:12 +00:00
|
|
|
break;
|
2012-05-25 19:38:11 +00:00
|
|
|
}
|
|
|
|
|
2012-06-12 15:34:58 +00:00
|
|
|
evsel->name = strdup(bf);
|
|
|
|
|
|
|
|
return evsel->name ?: "unknown";
|
2012-05-25 19:38:11 +00:00
|
|
|
}
|
|
|
|
|
2013-01-22 09:09:44 +00:00
|
|
|
const char *perf_evsel__group_name(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return evsel->group_name ?: "anon group";
|
|
|
|
}
|
|
|
|
|
|
|
|
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct perf_evsel *pos;
|
|
|
|
const char *group_name = perf_evsel__group_name(evsel);
|
|
|
|
|
|
|
|
ret = scnprintf(buf, size, "%s", group_name);
|
|
|
|
|
|
|
|
ret += scnprintf(buf + ret, size - ret, " { %s",
|
|
|
|
perf_evsel__name(evsel));
|
|
|
|
|
|
|
|
for_each_group_member(pos, evsel)
|
|
|
|
ret += scnprintf(buf + ret, size - ret, ", %s",
|
|
|
|
perf_evsel__name(pos));
|
|
|
|
|
|
|
|
ret += scnprintf(buf + ret, size - ret, " }");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-02 15:56:40 +00:00
|
|
|
static void
|
2015-01-05 18:23:04 +00:00
|
|
|
perf_evsel__config_callgraph(struct perf_evsel *evsel,
|
|
|
|
struct record_opts *opts)
|
2014-03-02 15:56:40 +00:00
|
|
|
{
|
|
|
|
bool function = perf_evsel__is_function_event(evsel);
|
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
|
|
|
|
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
|
|
|
|
|
2015-01-05 18:23:04 +00:00
|
|
|
if (callchain_param.record_mode == CALLCHAIN_LBR) {
|
|
|
|
if (!opts->branch_stack) {
|
|
|
|
if (attr->exclude_user) {
|
|
|
|
pr_warning("LBR callstack option is only available "
|
|
|
|
"to get user callchain information. "
|
|
|
|
"Falling back to framepointers.\n");
|
|
|
|
} else {
|
|
|
|
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
|
|
|
|
attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
|
|
|
|
PERF_SAMPLE_BRANCH_CALL_STACK;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
pr_warning("Cannot use LBR callstack with branch stack. "
|
|
|
|
"Falling back to framepointers.\n");
|
|
|
|
}
|
|
|
|
|
2014-09-23 01:01:41 +00:00
|
|
|
if (callchain_param.record_mode == CALLCHAIN_DWARF) {
|
2014-03-02 15:56:40 +00:00
|
|
|
if (!function) {
|
|
|
|
perf_evsel__set_sample_bit(evsel, REGS_USER);
|
|
|
|
perf_evsel__set_sample_bit(evsel, STACK_USER);
|
|
|
|
attr->sample_regs_user = PERF_REGS_MASK;
|
2014-09-23 01:01:41 +00:00
|
|
|
attr->sample_stack_user = callchain_param.dump_size;
|
2014-03-02 15:56:40 +00:00
|
|
|
attr->exclude_callchain_user = 1;
|
|
|
|
} else {
|
|
|
|
pr_info("Cannot use DWARF unwind for function trace event,"
|
|
|
|
" falling back to framepointers.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (function) {
|
|
|
|
pr_info("Disabling user space callchains for function trace event.\n");
|
|
|
|
attr->exclude_callchain_user = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-12 17:34:01 +00:00
|
|
|
/*
|
|
|
|
* The enable_on_exec/disabled value strategy:
|
|
|
|
*
|
|
|
|
* 1) For any type of traced program:
|
|
|
|
* - all independent events and group leaders are disabled
|
|
|
|
* - all group members are enabled
|
|
|
|
*
|
|
|
|
* Group members are ruled by group leaders. They need to
|
|
|
|
* be enabled, because the group scheduling relies on that.
|
|
|
|
*
|
|
|
|
* 2) For traced programs executed by perf:
|
|
|
|
* - all independent events and group leaders have
|
|
|
|
* enable_on_exec set
|
|
|
|
* - we don't specifically enable or disable any event during
|
|
|
|
* the record command
|
|
|
|
*
|
|
|
|
* Independent events and group leaders are initially disabled
|
|
|
|
* and get enabled by exec. Group members are ruled by group
|
|
|
|
* leaders as stated in 1).
|
|
|
|
*
|
|
|
|
* 3) For traced programs attached by perf (pid/tid):
|
|
|
|
* - we specifically enable or disable all events during
|
|
|
|
* the record command
|
|
|
|
*
|
|
|
|
* When attaching events to already running traced we
|
|
|
|
* enable/disable events specifically, as there's no
|
|
|
|
* initial traced exec call.
|
|
|
|
*/
|
2013-12-19 17:43:45 +00:00
|
|
|
void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
|
2011-11-08 16:41:57 +00:00
|
|
|
{
|
2012-10-10 15:39:03 +00:00
|
|
|
struct perf_evsel *leader = evsel->leader;
|
2011-11-08 16:41:57 +00:00
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
2014-07-31 06:00:52 +00:00
|
|
|
int track = evsel->tracking;
|
2013-11-15 13:52:29 +00:00
|
|
|
bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
|
2011-11-08 16:41:57 +00:00
|
|
|
|
2012-12-13 16:13:07 +00:00
|
|
|
attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->inherit = !opts->no_inherit;
|
|
|
|
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, IP);
|
|
|
|
perf_evsel__set_sample_bit(evsel, TID);
|
2011-11-08 16:41:57 +00:00
|
|
|
|
2012-10-10 15:39:03 +00:00
|
|
|
if (evsel->sample_read) {
|
|
|
|
perf_evsel__set_sample_bit(evsel, READ);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need ID even in case of single event, because
|
|
|
|
* PERF_SAMPLE_READ process ID specific data.
|
|
|
|
*/
|
2013-08-27 08:23:09 +00:00
|
|
|
perf_evsel__set_sample_id(evsel, false);
|
2012-10-10 15:39:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Apply group format only if we belong to group
|
|
|
|
* with more than one members.
|
|
|
|
*/
|
|
|
|
if (leader->nr_members > 1) {
|
|
|
|
attr->read_format |= PERF_FORMAT_GROUP;
|
|
|
|
attr->inherit = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-08 16:41:57 +00:00
|
|
|
/*
|
2014-06-09 05:43:37 +00:00
|
|
|
* We default some events to have a default interval. But keep
|
2011-11-08 16:41:57 +00:00
|
|
|
* it a weak assumption overridable by the user.
|
|
|
|
*/
|
2014-06-09 05:43:37 +00:00
|
|
|
if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
|
2011-11-08 16:41:57 +00:00
|
|
|
opts->user_interval != ULLONG_MAX)) {
|
|
|
|
if (opts->freq) {
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, PERIOD);
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->freq = 1;
|
|
|
|
attr->sample_freq = opts->freq;
|
|
|
|
} else {
|
|
|
|
attr->sample_period = opts->default_interval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-10 15:39:03 +00:00
|
|
|
/*
|
|
|
|
* Disable sampling for all group members other
|
|
|
|
* than leader in case leader 'leads' the sampling.
|
|
|
|
*/
|
|
|
|
if ((leader != evsel) && leader->sample_read) {
|
|
|
|
attr->sample_freq = 0;
|
|
|
|
attr->sample_period = 0;
|
|
|
|
}
|
|
|
|
|
2011-11-08 16:41:57 +00:00
|
|
|
if (opts->no_samples)
|
|
|
|
attr->sample_freq = 0;
|
|
|
|
|
|
|
|
if (opts->inherit_stat)
|
|
|
|
attr->inherit_stat = 1;
|
|
|
|
|
|
|
|
if (opts->sample_address) {
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, ADDR);
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->mmap_data = track;
|
|
|
|
}
|
|
|
|
|
2014-11-13 17:21:03 +00:00
|
|
|
/*
|
|
|
|
* We don't allow user space callchains for function trace
|
|
|
|
* event, due to issues with page faults while tracing page
|
|
|
|
* fault handler and its overall trickiness nature.
|
|
|
|
*/
|
|
|
|
if (perf_evsel__is_function_event(evsel))
|
|
|
|
evsel->attr.exclude_callchain_user = 1;
|
|
|
|
|
2014-09-23 01:01:41 +00:00
|
|
|
if (callchain_param.enabled && !evsel->no_aux_samples)
|
2015-01-05 18:23:04 +00:00
|
|
|
perf_evsel__config_callgraph(evsel, opts);
|
2012-08-07 13:20:47 +00:00
|
|
|
|
2014-09-24 11:48:39 +00:00
|
|
|
if (opts->sample_intr_regs) {
|
|
|
|
attr->sample_regs_intr = PERF_REGS_MASK;
|
|
|
|
perf_evsel__set_sample_bit(evsel, REGS_INTR);
|
|
|
|
}
|
|
|
|
|
2013-11-15 13:52:29 +00:00
|
|
|
if (target__has_cpu(&opts->target))
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, CPU);
|
2011-11-08 16:41:57 +00:00
|
|
|
|
2011-12-20 14:32:45 +00:00
|
|
|
if (opts->period)
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, PERIOD);
|
2011-12-20 14:32:45 +00:00
|
|
|
|
2014-07-31 06:45:04 +00:00
|
|
|
/*
|
|
|
|
* When the user explicitely disabled time don't force it here.
|
|
|
|
*/
|
|
|
|
if (opts->sample_time &&
|
|
|
|
(!perf_missing_features.sample_id_all &&
|
2015-07-06 11:51:01 +00:00
|
|
|
(!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
|
|
|
|
opts->sample_time_set)))
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, TIME);
|
2011-11-08 16:41:57 +00:00
|
|
|
|
2014-07-14 10:02:56 +00:00
|
|
|
if (opts->raw_samples && !evsel->no_aux_samples) {
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, TIME);
|
|
|
|
perf_evsel__set_sample_bit(evsel, RAW);
|
|
|
|
perf_evsel__set_sample_bit(evsel, CPU);
|
2011-11-08 16:41:57 +00:00
|
|
|
}
|
|
|
|
|
2013-01-24 15:10:37 +00:00
|
|
|
if (opts->sample_address)
|
2013-11-01 13:51:35 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, DATA_SRC);
|
2013-01-24 15:10:37 +00:00
|
|
|
|
2014-01-14 20:52:14 +00:00
|
|
|
if (opts->no_buffering) {
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->watermark = 0;
|
|
|
|
attr->wakeup_events = 1;
|
|
|
|
}
|
2014-07-14 10:02:56 +00:00
|
|
|
if (opts->branch_stack && !evsel->no_aux_samples) {
|
2012-12-10 17:53:43 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
|
2012-02-09 22:21:02 +00:00
|
|
|
attr->branch_sample_type = opts->branch_stack;
|
|
|
|
}
|
2011-11-08 16:41:57 +00:00
|
|
|
|
2013-01-24 15:10:29 +00:00
|
|
|
if (opts->sample_weight)
|
2013-11-01 13:51:35 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, WEIGHT);
|
2013-01-24 15:10:29 +00:00
|
|
|
|
2015-01-29 08:06:46 +00:00
|
|
|
attr->task = track;
|
2013-08-21 10:10:25 +00:00
|
|
|
attr->mmap = track;
|
2014-05-30 14:49:42 +00:00
|
|
|
attr->mmap2 = track && !perf_missing_features.mmap2;
|
2013-08-21 10:10:25 +00:00
|
|
|
attr->comm = track;
|
2011-11-08 16:41:57 +00:00
|
|
|
|
2013-09-20 14:40:43 +00:00
|
|
|
if (opts->sample_transaction)
|
2013-11-01 13:51:35 +00:00
|
|
|
perf_evsel__set_sample_bit(evsel, TRANSACTION);
|
2013-09-20 14:40:43 +00:00
|
|
|
|
2015-02-24 23:13:40 +00:00
|
|
|
if (opts->running_time) {
|
|
|
|
evsel->attr.read_format |=
|
|
|
|
PERF_FORMAT_TOTAL_TIME_ENABLED |
|
|
|
|
PERF_FORMAT_TOTAL_TIME_RUNNING;
|
|
|
|
}
|
|
|
|
|
2012-11-12 17:34:01 +00:00
|
|
|
/*
|
|
|
|
* XXX see the function comment above
|
|
|
|
*
|
|
|
|
* Disabling only independent events or group leaders,
|
|
|
|
* keeping group members enabled.
|
|
|
|
*/
|
2012-11-29 06:38:30 +00:00
|
|
|
if (perf_evsel__is_group_leader(evsel))
|
2012-11-12 17:34:01 +00:00
|
|
|
attr->disabled = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setting enable_on_exec for independent events and
|
|
|
|
* group leaders for traced executed by perf.
|
|
|
|
*/
|
2014-01-11 21:38:27 +00:00
|
|
|
if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
|
|
|
|
!opts->initial_delay)
|
2011-11-08 16:41:57 +00:00
|
|
|
attr->enable_on_exec = 1;
|
2014-07-14 10:02:57 +00:00
|
|
|
|
|
|
|
if (evsel->immediate) {
|
|
|
|
attr->disabled = 0;
|
|
|
|
attr->enable_on_exec = 0;
|
|
|
|
}
|
2015-03-30 22:19:31 +00:00
|
|
|
|
|
|
|
clockid = opts->clockid;
|
|
|
|
if (opts->use_clockid) {
|
|
|
|
attr->use_clockid = 1;
|
|
|
|
attr->clockid = opts->clockid;
|
|
|
|
}
|
2011-11-08 16:41:57 +00:00
|
|
|
}
|
|
|
|
|
2014-10-13 16:30:27 +00:00
|
|
|
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
2011-01-03 18:39:04 +00:00
|
|
|
{
|
2011-05-27 15:58:34 +00:00
|
|
|
int cpu, thread;
|
2014-07-31 06:00:51 +00:00
|
|
|
|
|
|
|
if (evsel->system_wide)
|
|
|
|
nthreads = 1;
|
|
|
|
|
2011-01-03 18:39:04 +00:00
|
|
|
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
|
2011-05-27 15:58:34 +00:00
|
|
|
|
|
|
|
if (evsel->fd) {
|
|
|
|
for (cpu = 0; cpu < ncpus; cpu++) {
|
|
|
|
for (thread = 0; thread < nthreads; thread++) {
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-03 18:39:04 +00:00
|
|
|
return evsel->fd != NULL ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-08-03 00:41:10 +00:00
|
|
|
static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
|
|
|
|
int ioc, void *arg)
|
2012-09-26 18:07:39 +00:00
|
|
|
{
|
|
|
|
int cpu, thread;
|
|
|
|
|
2014-07-31 06:00:51 +00:00
|
|
|
if (evsel->system_wide)
|
|
|
|
nthreads = 1;
|
|
|
|
|
2012-09-26 18:07:39 +00:00
|
|
|
for (cpu = 0; cpu < ncpus; cpu++) {
|
|
|
|
for (thread = 0; thread < nthreads; thread++) {
|
|
|
|
int fd = FD(evsel, cpu, thread),
|
2013-08-03 00:41:10 +00:00
|
|
|
err = ioctl(fd, ioc, arg);
|
2012-09-26 18:07:39 +00:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
perf evsel: Rename set_filter to apply_filter
We need to be able to go on constructing a complex filter in multiple
stages, since we can only set one filter per event.
For instance, we need to be able, in 'perf trace' to filter by the
'common_pid' field all the time, if only for the tracer itself, to
avoid a feedback loop, and, in addition, we may want to filter the
raw_syscalls:sys_{enter,exit} events by its 'id' filter, when using
'perf trace -e open,close' or 'perf trace -e !open,close', i.e. when
we are interested in just a subset of syscalls or when we are not
interested in it.
So we will have:
perf_evsel__set_filter(evsel, char *filter)
Replaces whatever is in evsel->filter.
perf_evsel__append_filter(evsel, const char *op, char *filter)
Appends, using op ("&&" or "||") with what is in evsel->filter.
perf_evsel__apply_filter(evsel, filter):
That actually applies a filter, be it the one being
constructed in evsel->filter, or any other, for tools
with more specific ways to build the filter, issuing
the appropriate ioctl for all the evsel fds.
The same changes will be made to the evlist__{set,apply} variants to
keep everything consistent.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-2s5z9xtpnc2lwio3cv5x0jek@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-03 18:53:49 +00:00
|
|
|
int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
|
|
|
|
const char *filter)
|
2013-08-03 00:41:10 +00:00
|
|
|
{
|
|
|
|
return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
|
|
|
|
PERF_EVENT_IOC_SET_FILTER,
|
|
|
|
(void *)filter);
|
|
|
|
}
|
|
|
|
|
2015-07-03 20:05:50 +00:00
|
|
|
int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
|
|
|
|
{
|
|
|
|
char *new_filter = strdup(filter);
|
|
|
|
|
|
|
|
if (new_filter != NULL) {
|
|
|
|
free(evsel->filter);
|
|
|
|
evsel->filter = new_filter;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-07-04 15:19:13 +00:00
|
|
|
int perf_evsel__append_filter(struct perf_evsel *evsel,
|
|
|
|
const char *op, const char *filter)
|
|
|
|
{
|
|
|
|
char *new_filter;
|
|
|
|
|
|
|
|
if (evsel->filter == NULL)
|
|
|
|
return perf_evsel__set_filter(evsel, filter);
|
|
|
|
|
|
|
|
if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
|
|
|
|
free(evsel->filter);
|
|
|
|
evsel->filter = new_filter;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-08-03 00:41:10 +00:00
|
|
|
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
|
|
|
|
PERF_EVENT_IOC_ENABLE,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
|
2011-01-13 00:39:13 +00:00
|
|
|
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
perf evsel: Don't rely on malloc working for sz 0
When running perf on ARC (uClibc based userspace), ran into this issue
------------->8----------------
[ARCLinux]$ ./perf record ls
bin etc perf sys
debug init perf.data tmp
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.001 MB perf.data (~24 samples) ]
[ARCLinux]$ ./perf report
incompatible file format (rerun with -v to learn more)
------------->8----------------
The problem happens in the following call stack when zalloc is called
with size zero
glibc default / uClibc with MALLOC_GLIBC_COMPAT are OK, but not if that
config option is not enabled.
cmd_report
perf_session__new
perf_session__open
perf_session__read_header
read_attr(fd, header, &f_attr)
nr_ids = f_attr.ids.size / sizeof(u64); <-- 0
perf_evsel__alloc_id(vsel, 1, nr_ids)
zalloc(ncpus * nthreads * sizeof(u64)) <-- 0
header.c: read_attr()
(gdb) p *f_attr
$17 = {
attr = {
type = 0,
size = 96,
config = 0,
{
sample_period = 4000,
sample_freq = 4000
},
...
ids = {
offset = 104,
size = 0 <------
}
}
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Suggested-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexey Brodkin <Alexey.Brodkin@synopsys.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1421156604-30603-5-git-send-email-vgupta@synopsys.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-01-13 13:43:23 +00:00
|
|
|
if (ncpus == 0 || nthreads == 0)
|
|
|
|
return 0;
|
|
|
|
|
2014-07-31 06:00:51 +00:00
|
|
|
if (evsel->system_wide)
|
|
|
|
nthreads = 1;
|
|
|
|
|
2011-03-10 14:15:54 +00:00
|
|
|
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
|
|
|
|
if (evsel->sample_id == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
|
|
|
|
if (evsel->id == NULL) {
|
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
evsel->sample_id = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2011-01-13 00:39:13 +00:00
|
|
|
}
|
|
|
|
|
2014-10-13 16:30:27 +00:00
|
|
|
static void perf_evsel__free_fd(struct perf_evsel *evsel)
|
2011-01-03 18:39:04 +00:00
|
|
|
{
|
|
|
|
xyarray__delete(evsel->fd);
|
|
|
|
evsel->fd = NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-13 16:30:27 +00:00
|
|
|
static void perf_evsel__free_id(struct perf_evsel *evsel)
|
2011-01-13 00:39:13 +00:00
|
|
|
{
|
2011-03-10 14:15:54 +00:00
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
evsel->sample_id = NULL;
|
2013-12-26 20:41:15 +00:00
|
|
|
zfree(&evsel->id);
|
2011-01-13 00:39:13 +00:00
|
|
|
}
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
int cpu, thread;
|
|
|
|
|
2014-07-31 06:00:51 +00:00
|
|
|
if (evsel->system_wide)
|
|
|
|
nthreads = 1;
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
for (cpu = 0; cpu < ncpus; cpu++)
|
|
|
|
for (thread = 0; thread < nthreads; ++thread) {
|
|
|
|
close(FD(evsel, cpu, thread));
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-18 23:41:45 +00:00
|
|
|
void perf_evsel__exit(struct perf_evsel *evsel)
|
2011-01-03 18:39:04 +00:00
|
|
|
{
|
|
|
|
assert(list_empty(&evsel->node));
|
2013-03-15 05:48:49 +00:00
|
|
|
perf_evsel__free_fd(evsel);
|
|
|
|
perf_evsel__free_id(evsel);
|
2014-10-16 16:25:01 +00:00
|
|
|
close_cgroup(evsel->cgrp);
|
2015-06-22 22:36:04 +00:00
|
|
|
cpu_map__put(evsel->cpus);
|
2015-06-22 22:36:07 +00:00
|
|
|
thread_map__put(evsel->threads);
|
2014-10-16 16:25:01 +00:00
|
|
|
zfree(&evsel->group_name);
|
|
|
|
zfree(&evsel->name);
|
2014-10-09 18:29:51 +00:00
|
|
|
perf_evsel__object.fini(evsel);
|
2011-01-18 23:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void perf_evsel__delete(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
perf_evsel__exit(evsel);
|
2011-01-03 18:39:04 +00:00
|
|
|
free(evsel);
|
|
|
|
}
|
2011-01-03 19:45:52 +00:00
|
|
|
|
2015-06-26 09:29:11 +00:00
|
|
|
void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
|
2014-11-21 09:31:05 +00:00
|
|
|
struct perf_counts_values *count)
|
2013-01-29 11:47:43 +00:00
|
|
|
{
|
|
|
|
struct perf_counts_values tmp;
|
|
|
|
|
|
|
|
if (!evsel->prev_raw_counts)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (cpu == -1) {
|
|
|
|
tmp = evsel->prev_raw_counts->aggr;
|
|
|
|
evsel->prev_raw_counts->aggr = *count;
|
|
|
|
} else {
|
2015-06-26 09:29:11 +00:00
|
|
|
tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
|
|
|
|
*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
|
2013-01-29 11:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
count->val = count->val - tmp.val;
|
|
|
|
count->ena = count->ena - tmp.ena;
|
|
|
|
count->run = count->run - tmp.run;
|
|
|
|
}
|
|
|
|
|
2014-11-21 09:31:06 +00:00
|
|
|
void perf_counts_values__scale(struct perf_counts_values *count,
|
|
|
|
bool scale, s8 *pscaled)
|
|
|
|
{
|
|
|
|
s8 scaled = 0;
|
|
|
|
|
|
|
|
if (scale) {
|
|
|
|
if (count->run == 0) {
|
|
|
|
scaled = -1;
|
|
|
|
count->val = 0;
|
|
|
|
} else if (count->run < count->ena) {
|
|
|
|
scaled = 1;
|
|
|
|
count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
count->ena = count->run = 0;
|
|
|
|
|
|
|
|
if (pscaled)
|
|
|
|
*pscaled = scaled;
|
|
|
|
}
|
|
|
|
|
2015-06-26 09:29:18 +00:00
|
|
|
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
|
|
|
|
struct perf_counts_values *count)
|
|
|
|
{
|
|
|
|
memset(count, 0, sizeof(*count));
|
|
|
|
|
|
|
|
if (FD(evsel, cpu, thread) < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread, bool scale)
|
|
|
|
{
|
|
|
|
struct perf_counts_values count;
|
|
|
|
size_t nv = scale ? 3 : 1;
|
|
|
|
|
|
|
|
if (FD(evsel, cpu, thread) < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-06-26 09:29:11 +00:00
|
|
|
if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
|
2011-01-04 02:13:17 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-01-03 19:45:52 +00:00
|
|
|
if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
|
|
|
|
return -errno;
|
|
|
|
|
2015-06-26 09:29:11 +00:00
|
|
|
perf_evsel__compute_deltas(evsel, cpu, thread, &count);
|
2014-11-21 09:31:06 +00:00
|
|
|
perf_counts_values__scale(&count, scale, NULL);
|
2015-06-26 09:29:11 +00:00
|
|
|
*perf_counts(evsel->counts, cpu, thread) = count;
|
2011-01-03 19:45:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
|
|
|
|
{
|
|
|
|
struct perf_evsel *leader = evsel->leader;
|
|
|
|
int fd;
|
|
|
|
|
2012-11-29 06:38:30 +00:00
|
|
|
if (perf_evsel__is_group_leader(evsel))
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leader must be already processed/open,
|
|
|
|
* if not it's a bug.
|
|
|
|
*/
|
|
|
|
BUG_ON(!leader->fd);
|
|
|
|
|
|
|
|
fd = FD(leader, cpu, thread);
|
|
|
|
BUG_ON(fd == -1);
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
struct bit_names {
|
|
|
|
int bit;
|
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
|
|
|
|
{
|
|
|
|
bool first_bit = true;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (value & bits[i].bit) {
|
|
|
|
buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
|
|
|
|
first_bit = false;
|
|
|
|
}
|
|
|
|
} while (bits[++i].name != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __p_sample_type(char *buf, size_t size, u64 value)
|
|
|
|
{
|
|
|
|
#define bit_name(n) { PERF_SAMPLE_##n, #n }
|
|
|
|
struct bit_names bits[] = {
|
|
|
|
bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
|
|
|
|
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
|
|
|
|
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
|
|
|
|
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
|
|
|
|
bit_name(IDENTIFIER), bit_name(REGS_INTR),
|
|
|
|
{ .name = NULL, }
|
|
|
|
};
|
|
|
|
#undef bit_name
|
|
|
|
__p_bits(buf, size, value, bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __p_read_format(char *buf, size_t size, u64 value)
|
|
|
|
{
|
|
|
|
#define bit_name(n) { PERF_FORMAT_##n, #n }
|
|
|
|
struct bit_names bits[] = {
|
|
|
|
bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
|
|
|
|
bit_name(ID), bit_name(GROUP),
|
|
|
|
{ .name = NULL, }
|
|
|
|
};
|
|
|
|
#undef bit_name
|
|
|
|
__p_bits(buf, size, value, bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define BUF_SIZE 1024
|
|
|
|
|
2015-06-11 12:51:04 +00:00
|
|
|
#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
|
|
|
|
#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
|
|
|
|
#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
|
|
|
|
#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
|
|
|
|
|
|
|
|
#define PRINT_ATTRn(_n, _f, _p) \
|
|
|
|
do { \
|
|
|
|
if (attr->_f) { \
|
|
|
|
_p(attr->_f); \
|
|
|
|
ret += attr__fprintf(fp, _n, buf, priv);\
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
|
|
|
|
|
|
|
|
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
|
|
|
|
attr__fprintf_f attr__fprintf, void *priv)
|
|
|
|
{
|
|
|
|
char buf[BUF_SIZE];
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
PRINT_ATTRf(type, p_unsigned);
|
|
|
|
PRINT_ATTRf(size, p_unsigned);
|
|
|
|
PRINT_ATTRf(config, p_hex);
|
|
|
|
PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
|
|
|
|
PRINT_ATTRf(sample_type, p_sample_type);
|
|
|
|
PRINT_ATTRf(read_format, p_read_format);
|
|
|
|
|
|
|
|
PRINT_ATTRf(disabled, p_unsigned);
|
|
|
|
PRINT_ATTRf(inherit, p_unsigned);
|
|
|
|
PRINT_ATTRf(pinned, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclusive, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_user, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_kernel, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_hv, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_idle, p_unsigned);
|
|
|
|
PRINT_ATTRf(mmap, p_unsigned);
|
|
|
|
PRINT_ATTRf(comm, p_unsigned);
|
|
|
|
PRINT_ATTRf(freq, p_unsigned);
|
|
|
|
PRINT_ATTRf(inherit_stat, p_unsigned);
|
|
|
|
PRINT_ATTRf(enable_on_exec, p_unsigned);
|
|
|
|
PRINT_ATTRf(task, p_unsigned);
|
|
|
|
PRINT_ATTRf(watermark, p_unsigned);
|
|
|
|
PRINT_ATTRf(precise_ip, p_unsigned);
|
|
|
|
PRINT_ATTRf(mmap_data, p_unsigned);
|
|
|
|
PRINT_ATTRf(sample_id_all, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_host, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_guest, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
|
|
|
|
PRINT_ATTRf(exclude_callchain_user, p_unsigned);
|
|
|
|
PRINT_ATTRf(mmap2, p_unsigned);
|
|
|
|
PRINT_ATTRf(comm_exec, p_unsigned);
|
|
|
|
PRINT_ATTRf(use_clockid, p_unsigned);
|
2015-07-21 09:44:03 +00:00
|
|
|
PRINT_ATTRf(context_switch, p_unsigned);
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
|
|
|
|
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
|
|
|
|
PRINT_ATTRf(bp_type, p_unsigned);
|
|
|
|
PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
|
|
|
|
PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
|
|
|
|
PRINT_ATTRf(sample_regs_user, p_hex);
|
|
|
|
PRINT_ATTRf(sample_stack_user, p_unsigned);
|
|
|
|
PRINT_ATTRf(clockid, p_signed);
|
|
|
|
PRINT_ATTRf(sample_regs_intr, p_hex);
|
2015-04-09 15:54:06 +00:00
|
|
|
PRINT_ATTRf(aux_watermark, p_unsigned);
|
2013-08-14 12:48:24 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
|
|
|
|
void *priv __attribute__((unused)))
|
|
|
|
{
|
|
|
|
return fprintf(fp, " %-32s %s\n", name, val);
|
|
|
|
}
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
struct thread_map *threads)
|
2011-01-03 19:48:12 +00:00
|
|
|
{
|
2014-07-31 06:00:51 +00:00
|
|
|
int cpu, thread, nthreads;
|
2014-06-30 20:28:47 +00:00
|
|
|
unsigned long flags = PERF_FLAG_FD_CLOEXEC;
|
2011-10-25 12:42:19 +00:00
|
|
|
int pid = -1, err;
|
2013-08-05 02:41:26 +00:00
|
|
|
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
|
2011-01-03 19:48:12 +00:00
|
|
|
|
2014-07-31 06:00:51 +00:00
|
|
|
if (evsel->system_wide)
|
|
|
|
nthreads = 1;
|
|
|
|
else
|
|
|
|
nthreads = threads->nr;
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
if (evsel->fd == NULL &&
|
2014-07-31 06:00:51 +00:00
|
|
|
perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
|
2011-10-25 12:42:19 +00:00
|
|
|
return -ENOMEM;
|
2011-01-04 02:13:17 +00:00
|
|
|
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
if (evsel->cgrp) {
|
2014-06-30 20:28:47 +00:00
|
|
|
flags |= PERF_FLAG_PID_CGROUP;
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
pid = evsel->cgrp->fd;
|
|
|
|
}
|
|
|
|
|
2012-12-13 16:13:07 +00:00
|
|
|
fallback_missing_features:
|
2015-03-30 22:19:31 +00:00
|
|
|
if (perf_missing_features.clockid_wrong)
|
|
|
|
evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
|
|
|
|
if (perf_missing_features.clockid) {
|
|
|
|
evsel->attr.use_clockid = 0;
|
|
|
|
evsel->attr.clockid = 0;
|
|
|
|
}
|
2014-06-30 20:28:47 +00:00
|
|
|
if (perf_missing_features.cloexec)
|
|
|
|
flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
|
2013-08-21 10:10:25 +00:00
|
|
|
if (perf_missing_features.mmap2)
|
|
|
|
evsel->attr.mmap2 = 0;
|
2012-12-13 16:13:07 +00:00
|
|
|
if (perf_missing_features.exclude_guest)
|
|
|
|
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
|
|
|
|
retry_sample_id:
|
|
|
|
if (perf_missing_features.sample_id_all)
|
|
|
|
evsel->attr.sample_id_all = 0;
|
|
|
|
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
if (verbose >= 2) {
|
|
|
|
fprintf(stderr, "%.60s\n", graph_dotted_line);
|
|
|
|
fprintf(stderr, "perf_event_attr:\n");
|
|
|
|
perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
|
|
|
|
fprintf(stderr, "%.60s\n", graph_dotted_line);
|
|
|
|
}
|
2013-08-14 12:48:24 +00:00
|
|
|
|
2011-01-04 01:09:46 +00:00
|
|
|
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
2011-01-12 02:08:18 +00:00
|
|
|
|
2014-07-31 06:00:51 +00:00
|
|
|
for (thread = 0; thread < nthreads; thread++) {
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
int group_fd;
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
|
2014-07-31 06:00:51 +00:00
|
|
|
if (!evsel->cgrp && !evsel->system_wide)
|
2015-06-22 22:36:02 +00:00
|
|
|
pid = thread_map__pid(threads, thread);
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
group_fd = get_group_fd(evsel, cpu, thread);
|
2013-08-05 02:41:26 +00:00
|
|
|
retry_open:
|
2014-03-18 19:10:42 +00:00
|
|
|
pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
|
2013-08-14 12:48:24 +00:00
|
|
|
pid, cpus->map[cpu], group_fd, flags);
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
pid,
|
2011-01-12 01:42:19 +00:00
|
|
|
cpus->map[cpu],
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 09:20:01 +00:00
|
|
|
group_fd, flags);
|
2011-10-25 12:42:19 +00:00
|
|
|
if (FD(evsel, cpu, thread) < 0) {
|
|
|
|
err = -errno;
|
2014-03-18 19:10:42 +00:00
|
|
|
pr_debug2("sys_perf_event_open failed, error %d\n",
|
2013-11-01 13:51:29 +00:00
|
|
|
err);
|
2012-12-13 16:13:07 +00:00
|
|
|
goto try_fallback;
|
2011-10-25 12:42:19 +00:00
|
|
|
}
|
2013-08-05 02:41:26 +00:00
|
|
|
set_rlimit = NO_CHANGE;
|
2015-03-30 22:19:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we succeeded but had to kill clockid, fail and
|
|
|
|
* have perf_evsel__open_strerror() print us a nice
|
|
|
|
* error.
|
|
|
|
*/
|
|
|
|
if (perf_missing_features.clockid ||
|
|
|
|
perf_missing_features.clockid_wrong) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_close;
|
|
|
|
}
|
2011-01-04 13:55:27 +00:00
|
|
|
}
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2012-12-13 16:13:07 +00:00
|
|
|
try_fallback:
|
2013-08-05 02:41:26 +00:00
|
|
|
/*
|
|
|
|
* perf stat needs between 5 and 22 fds per CPU. When we run out
|
|
|
|
* of them try to increase the limits.
|
|
|
|
*/
|
|
|
|
if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
|
|
|
|
struct rlimit l;
|
|
|
|
int old_errno = errno;
|
|
|
|
|
|
|
|
if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
|
|
|
|
if (set_rlimit == NO_CHANGE)
|
|
|
|
l.rlim_cur = l.rlim_max;
|
|
|
|
else {
|
|
|
|
l.rlim_cur = l.rlim_max + 1000;
|
|
|
|
l.rlim_max = l.rlim_cur;
|
|
|
|
}
|
|
|
|
if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
|
|
|
|
set_rlimit++;
|
|
|
|
errno = old_errno;
|
|
|
|
goto retry_open;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
errno = old_errno;
|
|
|
|
}
|
|
|
|
|
2012-12-13 16:13:07 +00:00
|
|
|
if (err != -EINVAL || cpu > 0 || thread > 0)
|
|
|
|
goto out_close;
|
|
|
|
|
2015-03-30 22:19:31 +00:00
|
|
|
/*
|
|
|
|
* Must probe features in the order they were added to the
|
|
|
|
* perf_event_attr interface.
|
|
|
|
*/
|
|
|
|
if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
|
|
|
|
perf_missing_features.clockid_wrong = true;
|
|
|
|
goto fallback_missing_features;
|
|
|
|
} else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
|
|
|
|
perf_missing_features.clockid = true;
|
|
|
|
goto fallback_missing_features;
|
|
|
|
} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
|
2014-06-30 20:28:47 +00:00
|
|
|
perf_missing_features.cloexec = true;
|
|
|
|
goto fallback_missing_features;
|
|
|
|
} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
|
2013-08-21 10:10:25 +00:00
|
|
|
perf_missing_features.mmap2 = true;
|
|
|
|
goto fallback_missing_features;
|
|
|
|
} else if (!perf_missing_features.exclude_guest &&
|
|
|
|
(evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
|
2012-12-13 16:13:07 +00:00
|
|
|
perf_missing_features.exclude_guest = true;
|
|
|
|
goto fallback_missing_features;
|
|
|
|
} else if (!perf_missing_features.sample_id_all) {
|
|
|
|
perf_missing_features.sample_id_all = true;
|
|
|
|
goto retry_sample_id;
|
|
|
|
}
|
|
|
|
|
2011-01-03 19:48:12 +00:00
|
|
|
out_close:
|
2011-01-04 13:55:27 +00:00
|
|
|
do {
|
|
|
|
while (--thread >= 0) {
|
|
|
|
close(FD(evsel, cpu, thread));
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
2014-07-31 06:00:51 +00:00
|
|
|
thread = nthreads;
|
2011-01-04 13:55:27 +00:00
|
|
|
} while (--cpu >= 0);
|
2011-10-25 12:42:19 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
if (evsel->fd == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
perf_evsel__close_fd(evsel, ncpus, nthreads);
|
|
|
|
perf_evsel__free_fd(evsel);
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
static struct {
|
|
|
|
struct cpu_map map;
|
|
|
|
int cpus[1];
|
|
|
|
} empty_cpu_map = {
|
|
|
|
.map.nr = 1,
|
|
|
|
.cpus = { -1, },
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
struct thread_map map;
|
|
|
|
int threads[1];
|
|
|
|
} empty_thread_map = {
|
|
|
|
.map.nr = 1,
|
|
|
|
.threads = { -1, },
|
|
|
|
};
|
|
|
|
|
2011-01-12 01:42:19 +00:00
|
|
|
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
struct thread_map *threads)
|
2011-01-03 19:48:12 +00:00
|
|
|
{
|
2011-01-04 13:55:27 +00:00
|
|
|
if (cpus == NULL) {
|
|
|
|
/* Work around old compiler warnings about strict aliasing */
|
|
|
|
cpus = &empty_cpu_map.map;
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
2011-01-04 13:55:27 +00:00
|
|
|
if (threads == NULL)
|
|
|
|
threads = &empty_thread_map.map;
|
2011-01-03 19:48:12 +00:00
|
|
|
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
return __perf_evsel__open(evsel, cpus, threads);
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
|
|
|
|
2011-01-12 01:42:19 +00:00
|
|
|
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
struct cpu_map *cpus)
|
2011-01-03 19:48:12 +00:00
|
|
|
{
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
|
2011-01-04 13:55:27 +00:00
|
|
|
}
|
2011-01-03 19:48:12 +00:00
|
|
|
|
2011-01-12 01:42:19 +00:00
|
|
|
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
struct thread_map *threads)
|
2011-01-04 13:55:27 +00:00
|
|
|
{
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 10:22:36 +00:00
|
|
|
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
|
2011-01-03 19:48:12 +00:00
|
|
|
}
|
2011-01-12 19:03:24 +00:00
|
|
|
|
2012-09-26 15:48:18 +00:00
|
|
|
static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
|
|
|
|
const union perf_event *event,
|
|
|
|
struct perf_sample *sample)
|
2011-01-21 15:46:41 +00:00
|
|
|
{
|
2012-09-26 15:48:18 +00:00
|
|
|
u64 type = evsel->attr.sample_type;
|
2011-01-21 15:46:41 +00:00
|
|
|
const u64 *array = event->sample.array;
|
2012-09-26 15:48:18 +00:00
|
|
|
bool swapped = evsel->needs_swap;
|
2012-05-30 12:23:44 +00:00
|
|
|
union u64_swap u;
|
2011-01-21 15:46:41 +00:00
|
|
|
|
|
|
|
array += ((event->header.size -
|
|
|
|
sizeof(event->header)) / sizeof(u64)) - 1;
|
|
|
|
|
2013-08-27 08:23:09 +00:00
|
|
|
if (type & PERF_SAMPLE_IDENTIFIER) {
|
|
|
|
sample->id = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
2011-01-21 15:46:41 +00:00
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
2012-05-30 12:23:44 +00:00
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
sample->cpu = u.val32[0];
|
2011-01-21 15:46:41 +00:00
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
sample->stream_id = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
sample->id = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
sample->time = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
2012-05-30 12:23:44 +00:00
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
sample->pid = u.val32[0];
|
|
|
|
sample->tid = u.val32[1];
|
2013-10-18 12:29:01 +00:00
|
|
|
array--;
|
2011-01-21 15:46:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
static inline bool overflow(const void *endp, u16 max_size, const void *offset,
|
|
|
|
u64 size)
|
2011-05-21 18:08:15 +00:00
|
|
|
{
|
2013-08-27 08:23:04 +00:00
|
|
|
return size > max_size || offset + size > endp;
|
|
|
|
}
|
2011-05-21 18:08:15 +00:00
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
#define OVERFLOW_CHECK(offset, size, max_size) \
|
|
|
|
do { \
|
|
|
|
if (overflow(endp, (max_size), (offset), (size))) \
|
|
|
|
return -EFAULT; \
|
|
|
|
} while (0)
|
2011-05-21 18:08:15 +00:00
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
#define OVERFLOW_CHECK_u64(offset) \
|
|
|
|
OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
|
2011-05-21 18:08:15 +00:00
|
|
|
|
2012-08-02 15:23:46 +00:00
|
|
|
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
|
2012-09-26 15:48:18 +00:00
|
|
|
struct perf_sample *data)
|
2011-01-21 15:46:41 +00:00
|
|
|
{
|
2012-08-02 15:23:46 +00:00
|
|
|
u64 type = evsel->attr.sample_type;
|
2012-09-26 15:48:18 +00:00
|
|
|
bool swapped = evsel->needs_swap;
|
2011-01-21 15:46:41 +00:00
|
|
|
const u64 *array;
|
2013-08-27 08:23:04 +00:00
|
|
|
u16 max_size = event->header.size;
|
|
|
|
const void *endp = (void *)event + max_size;
|
|
|
|
u64 sz;
|
2011-01-21 15:46:41 +00:00
|
|
|
|
2011-09-06 15:12:26 +00:00
|
|
|
/*
|
|
|
|
* used for cross-endian analysis. See git commit 65014ab3
|
|
|
|
* for why this goofiness is needed.
|
|
|
|
*/
|
2012-05-16 06:59:04 +00:00
|
|
|
union u64_swap u;
|
2011-09-06 15:12:26 +00:00
|
|
|
|
2011-12-15 16:32:39 +00:00
|
|
|
memset(data, 0, sizeof(*data));
|
2011-01-21 15:46:41 +00:00
|
|
|
data->cpu = data->pid = data->tid = -1;
|
|
|
|
data->stream_id = data->id = data->time = -1ULL;
|
2014-02-03 11:44:41 +00:00
|
|
|
data->period = evsel->attr.sample_period;
|
2013-01-24 15:10:29 +00:00
|
|
|
data->weight = 0;
|
2011-01-21 15:46:41 +00:00
|
|
|
|
|
|
|
if (event->header.type != PERF_RECORD_SAMPLE) {
|
2012-08-02 15:23:46 +00:00
|
|
|
if (!evsel->attr.sample_id_all)
|
2011-01-21 15:46:41 +00:00
|
|
|
return 0;
|
2012-09-26 15:48:18 +00:00
|
|
|
return perf_evsel__parse_id_sample(evsel, event, data);
|
2011-01-21 15:46:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
array = event->sample.array;
|
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
/*
|
|
|
|
* The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
|
|
|
|
* up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
|
|
|
|
* check the format does not go past the end of the event.
|
|
|
|
*/
|
2012-08-02 15:23:46 +00:00
|
|
|
if (evsel->sample_size + sizeof(event->header) > event->header.size)
|
2011-05-21 17:33:04 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2013-08-27 08:23:09 +00:00
|
|
|
data->id = -1ULL;
|
|
|
|
if (type & PERF_SAMPLE_IDENTIFIER) {
|
|
|
|
data->id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2011-01-21 15:46:41 +00:00
|
|
|
if (type & PERF_SAMPLE_IP) {
|
2013-08-27 08:23:06 +00:00
|
|
|
data->ip = *array;
|
2011-01-21 15:46:41 +00:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
2011-09-06 15:12:26 +00:00
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
data->pid = u.val32[0];
|
|
|
|
data->tid = u.val32[1];
|
2011-01-21 15:46:41 +00:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
data->time = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2011-05-30 19:08:23 +00:00
|
|
|
data->addr = 0;
|
2011-01-21 15:46:41 +00:00
|
|
|
if (type & PERF_SAMPLE_ADDR) {
|
|
|
|
data->addr = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
data->id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
data->stream_id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
2011-09-06 15:12:26 +00:00
|
|
|
|
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
data->cpu = u.val32[0];
|
2011-01-21 15:46:41 +00:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_PERIOD) {
|
|
|
|
data->period = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_READ) {
|
2012-10-10 15:38:13 +00:00
|
|
|
u64 read_format = evsel->attr.read_format;
|
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2012-10-10 15:38:13 +00:00
|
|
|
if (read_format & PERF_FORMAT_GROUP)
|
|
|
|
data->read.group.nr = *array;
|
|
|
|
else
|
|
|
|
data->read.one.value = *array;
|
|
|
|
|
|
|
|
array++;
|
|
|
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2012-10-10 15:38:13 +00:00
|
|
|
data->read.time_enabled = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2012-10-10 15:38:13 +00:00
|
|
|
data->read.time_running = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
|
|
|
|
if (read_format & PERF_FORMAT_GROUP) {
|
2013-08-27 08:23:04 +00:00
|
|
|
const u64 max_group_nr = UINT64_MAX /
|
|
|
|
sizeof(struct sample_read_value);
|
|
|
|
|
|
|
|
if (data->read.group.nr > max_group_nr)
|
|
|
|
return -EFAULT;
|
|
|
|
sz = data->read.group.nr *
|
|
|
|
sizeof(struct sample_read_value);
|
|
|
|
OVERFLOW_CHECK(array, sz, max_size);
|
|
|
|
data->read.group.values =
|
|
|
|
(struct sample_read_value *)array;
|
|
|
|
array = (void *)array + sz;
|
2012-10-10 15:38:13 +00:00
|
|
|
} else {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2012-10-10 15:38:13 +00:00
|
|
|
data->read.one.id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
2011-01-21 15:46:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CALLCHAIN) {
|
2013-08-27 08:23:04 +00:00
|
|
|
const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
|
2011-05-21 18:08:15 +00:00
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
|
|
|
data->callchain = (struct ip_callchain *)array++;
|
|
|
|
if (data->callchain->nr > max_callchain_nr)
|
2011-05-21 18:08:15 +00:00
|
|
|
return -EFAULT;
|
2013-08-27 08:23:04 +00:00
|
|
|
sz = data->callchain->nr * sizeof(u64);
|
|
|
|
OVERFLOW_CHECK(array, sz, max_size);
|
|
|
|
array = (void *)array + sz;
|
2011-01-21 15:46:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_RAW) {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2011-09-06 15:12:26 +00:00
|
|
|
u.val64 = *array;
|
|
|
|
if (WARN_ONCE(swapped,
|
|
|
|
"Endianness of raw data not corrected!\n")) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
|
|
|
data->raw_size = u.val32[0];
|
2013-08-27 08:23:04 +00:00
|
|
|
array = (void *)array + sizeof(u32);
|
2011-05-21 18:08:15 +00:00
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK(array, data->raw_size, max_size);
|
|
|
|
data->raw_data = (void *)array;
|
|
|
|
array = (void *)array + data->raw_size;
|
2011-01-21 15:46:41 +00:00
|
|
|
}
|
|
|
|
|
2012-02-09 22:21:01 +00:00
|
|
|
if (type & PERF_SAMPLE_BRANCH_STACK) {
|
2013-08-27 08:23:04 +00:00
|
|
|
const u64 max_branch_nr = UINT64_MAX /
|
|
|
|
sizeof(struct branch_entry);
|
2012-02-09 22:21:01 +00:00
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
|
|
|
data->branch_stack = (struct branch_stack *)array++;
|
2012-02-09 22:21:01 +00:00
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
if (data->branch_stack->nr > max_branch_nr)
|
|
|
|
return -EFAULT;
|
2012-02-09 22:21:01 +00:00
|
|
|
sz = data->branch_stack->nr * sizeof(struct branch_entry);
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK(array, sz, max_size);
|
|
|
|
array = (void *)array + sz;
|
2012-02-09 22:21:01 +00:00
|
|
|
}
|
2012-08-07 13:20:45 +00:00
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_REGS_USER) {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2013-08-27 08:23:10 +00:00
|
|
|
data->user_regs.abi = *array;
|
|
|
|
array++;
|
2012-08-07 13:20:45 +00:00
|
|
|
|
2013-08-27 08:23:10 +00:00
|
|
|
if (data->user_regs.abi) {
|
2014-01-07 12:47:25 +00:00
|
|
|
u64 mask = evsel->attr.sample_regs_user;
|
2013-08-27 08:23:04 +00:00
|
|
|
|
2014-01-07 12:47:25 +00:00
|
|
|
sz = hweight_long(mask) * sizeof(u64);
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK(array, sz, max_size);
|
2014-01-07 12:47:25 +00:00
|
|
|
data->user_regs.mask = mask;
|
2012-08-07 13:20:45 +00:00
|
|
|
data->user_regs.regs = (u64 *)array;
|
2013-08-27 08:23:04 +00:00
|
|
|
array = (void *)array + sz;
|
2012-08-07 13:20:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STACK_USER) {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
|
|
|
sz = *array++;
|
2012-08-07 13:20:45 +00:00
|
|
|
|
|
|
|
data->user_stack.offset = ((char *)(array - 1)
|
|
|
|
- (char *) event);
|
|
|
|
|
2013-08-27 08:23:04 +00:00
|
|
|
if (!sz) {
|
2012-08-07 13:20:45 +00:00
|
|
|
data->user_stack.size = 0;
|
|
|
|
} else {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK(array, sz, max_size);
|
2012-08-07 13:20:45 +00:00
|
|
|
data->user_stack.data = (char *)array;
|
2013-08-27 08:23:04 +00:00
|
|
|
array = (void *)array + sz;
|
|
|
|
OVERFLOW_CHECK_u64(array);
|
2013-07-04 13:20:34 +00:00
|
|
|
data->user_stack.size = *array++;
|
2013-10-02 13:46:39 +00:00
|
|
|
if (WARN_ONCE(data->user_stack.size > sz,
|
|
|
|
"user stack dump failure\n"))
|
|
|
|
return -EFAULT;
|
2012-08-07 13:20:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-24 15:10:29 +00:00
|
|
|
data->weight = 0;
|
|
|
|
if (type & PERF_SAMPLE_WEIGHT) {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2013-01-24 15:10:29 +00:00
|
|
|
data->weight = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2013-01-24 15:10:35 +00:00
|
|
|
data->data_src = PERF_MEM_DATA_SRC_NONE;
|
|
|
|
if (type & PERF_SAMPLE_DATA_SRC) {
|
2013-08-27 08:23:04 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2013-01-24 15:10:35 +00:00
|
|
|
data->data_src = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2013-09-20 14:40:43 +00:00
|
|
|
data->transaction = 0;
|
|
|
|
if (type & PERF_SAMPLE_TRANSACTION) {
|
2013-11-01 13:51:36 +00:00
|
|
|
OVERFLOW_CHECK_u64(array);
|
2013-09-20 14:40:43 +00:00
|
|
|
data->transaction = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2014-09-24 11:48:39 +00:00
|
|
|
data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
|
|
|
|
if (type & PERF_SAMPLE_REGS_INTR) {
|
|
|
|
OVERFLOW_CHECK_u64(array);
|
|
|
|
data->intr_regs.abi = *array;
|
|
|
|
array++;
|
|
|
|
|
|
|
|
if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
|
|
|
|
u64 mask = evsel->attr.sample_regs_intr;
|
|
|
|
|
|
|
|
sz = hweight_long(mask) * sizeof(u64);
|
|
|
|
OVERFLOW_CHECK(array, sz, max_size);
|
|
|
|
data->intr_regs.mask = mask;
|
|
|
|
data->intr_regs.regs = (u64 *)array;
|
|
|
|
array = (void *)array + sz;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-21 15:46:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-11-28 09:03:31 +00:00
|
|
|
|
2013-08-27 08:23:12 +00:00
|
|
|
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
|
2014-01-07 12:47:25 +00:00
|
|
|
u64 read_format)
|
2013-08-27 08:23:12 +00:00
|
|
|
{
|
|
|
|
size_t sz, result = sizeof(struct sample_event);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_IDENTIFIER)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_IP)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ADDR)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_PERIOD)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_READ) {
|
|
|
|
result += sizeof(u64);
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
|
|
|
result += sizeof(u64);
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
|
|
|
result += sizeof(u64);
|
|
|
|
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
|
|
|
|
if (read_format & PERF_FORMAT_GROUP) {
|
|
|
|
sz = sample->read.group.nr *
|
|
|
|
sizeof(struct sample_read_value);
|
|
|
|
result += sz;
|
|
|
|
} else {
|
|
|
|
result += sizeof(u64);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CALLCHAIN) {
|
|
|
|
sz = (sample->callchain->nr + 1) * sizeof(u64);
|
|
|
|
result += sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_RAW) {
|
|
|
|
result += sizeof(u32);
|
|
|
|
result += sample->raw_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_BRANCH_STACK) {
|
|
|
|
sz = sample->branch_stack->nr * sizeof(struct branch_entry);
|
|
|
|
sz += sizeof(u64);
|
|
|
|
result += sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_REGS_USER) {
|
|
|
|
if (sample->user_regs.abi) {
|
|
|
|
result += sizeof(u64);
|
2014-01-07 12:47:25 +00:00
|
|
|
sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
|
2013-08-27 08:23:12 +00:00
|
|
|
result += sz;
|
|
|
|
} else {
|
|
|
|
result += sizeof(u64);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STACK_USER) {
|
|
|
|
sz = sample->user_stack.size;
|
|
|
|
result += sizeof(u64);
|
|
|
|
if (sz) {
|
|
|
|
result += sz;
|
|
|
|
result += sizeof(u64);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_WEIGHT)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_DATA_SRC)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
2013-11-01 13:51:38 +00:00
|
|
|
if (type & PERF_SAMPLE_TRANSACTION)
|
|
|
|
result += sizeof(u64);
|
|
|
|
|
2014-09-24 11:48:39 +00:00
|
|
|
if (type & PERF_SAMPLE_REGS_INTR) {
|
|
|
|
if (sample->intr_regs.abi) {
|
|
|
|
result += sizeof(u64);
|
|
|
|
sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
|
|
|
|
result += sz;
|
|
|
|
} else {
|
|
|
|
result += sizeof(u64);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-27 08:23:12 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-11-28 09:03:31 +00:00
|
|
|
int perf_event__synthesize_sample(union perf_event *event, u64 type,
|
2014-01-07 12:47:25 +00:00
|
|
|
u64 read_format,
|
2011-11-28 09:03:31 +00:00
|
|
|
const struct perf_sample *sample,
|
|
|
|
bool swapped)
|
|
|
|
{
|
|
|
|
u64 *array;
|
2013-08-27 08:23:11 +00:00
|
|
|
size_t sz;
|
2011-11-28 09:03:31 +00:00
|
|
|
/*
|
|
|
|
* used for cross-endian analysis. See git commit 65014ab3
|
|
|
|
* for why this goofiness is needed.
|
|
|
|
*/
|
2012-05-16 06:59:04 +00:00
|
|
|
union u64_swap u;
|
2011-11-28 09:03:31 +00:00
|
|
|
|
|
|
|
array = event->sample.array;
|
|
|
|
|
2013-08-27 08:23:09 +00:00
|
|
|
if (type & PERF_SAMPLE_IDENTIFIER) {
|
|
|
|
*array = sample->id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2011-11-28 09:03:31 +00:00
|
|
|
if (type & PERF_SAMPLE_IP) {
|
2013-08-27 08:23:06 +00:00
|
|
|
*array = sample->ip;
|
2011-11-28 09:03:31 +00:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
|
|
|
u.val32[0] = sample->pid;
|
|
|
|
u.val32[1] = sample->tid;
|
|
|
|
if (swapped) {
|
|
|
|
/*
|
2012-08-02 15:23:46 +00:00
|
|
|
* Inverse of what is done in perf_evsel__parse_sample
|
2011-11-28 09:03:31 +00:00
|
|
|
*/
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
}
|
|
|
|
|
|
|
|
*array = u.val64;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
*array = sample->time;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ADDR) {
|
|
|
|
*array = sample->addr;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
*array = sample->id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
*array = sample->stream_id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
|
|
|
u.val32[0] = sample->cpu;
|
|
|
|
if (swapped) {
|
|
|
|
/*
|
2012-08-02 15:23:46 +00:00
|
|
|
* Inverse of what is done in perf_evsel__parse_sample
|
2011-11-28 09:03:31 +00:00
|
|
|
*/
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
}
|
|
|
|
*array = u.val64;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_PERIOD) {
|
|
|
|
*array = sample->period;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2013-08-27 08:23:11 +00:00
|
|
|
if (type & PERF_SAMPLE_READ) {
|
|
|
|
if (read_format & PERF_FORMAT_GROUP)
|
|
|
|
*array = sample->read.group.nr;
|
|
|
|
else
|
|
|
|
*array = sample->read.one.value;
|
|
|
|
array++;
|
|
|
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
|
|
|
*array = sample->read.time_enabled;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
|
|
|
*array = sample->read.time_running;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
|
|
|
|
if (read_format & PERF_FORMAT_GROUP) {
|
|
|
|
sz = sample->read.group.nr *
|
|
|
|
sizeof(struct sample_read_value);
|
|
|
|
memcpy(array, sample->read.group.values, sz);
|
|
|
|
array = (void *)array + sz;
|
|
|
|
} else {
|
|
|
|
*array = sample->read.one.id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CALLCHAIN) {
|
|
|
|
sz = (sample->callchain->nr + 1) * sizeof(u64);
|
|
|
|
memcpy(array, sample->callchain, sz);
|
|
|
|
array = (void *)array + sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_RAW) {
|
|
|
|
u.val32[0] = sample->raw_size;
|
|
|
|
if (WARN_ONCE(swapped,
|
|
|
|
"Endianness of raw data not corrected!\n")) {
|
|
|
|
/*
|
|
|
|
* Inverse of what is done in perf_evsel__parse_sample
|
|
|
|
*/
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
}
|
|
|
|
*array = u.val64;
|
|
|
|
array = (void *)array + sizeof(u32);
|
|
|
|
|
|
|
|
memcpy(array, sample->raw_data, sample->raw_size);
|
|
|
|
array = (void *)array + sample->raw_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_BRANCH_STACK) {
|
|
|
|
sz = sample->branch_stack->nr * sizeof(struct branch_entry);
|
|
|
|
sz += sizeof(u64);
|
|
|
|
memcpy(array, sample->branch_stack, sz);
|
|
|
|
array = (void *)array + sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_REGS_USER) {
|
|
|
|
if (sample->user_regs.abi) {
|
|
|
|
*array++ = sample->user_regs.abi;
|
2014-01-07 12:47:25 +00:00
|
|
|
sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
|
2013-08-27 08:23:11 +00:00
|
|
|
memcpy(array, sample->user_regs.regs, sz);
|
|
|
|
array = (void *)array + sz;
|
|
|
|
} else {
|
|
|
|
*array++ = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STACK_USER) {
|
|
|
|
sz = sample->user_stack.size;
|
|
|
|
*array++ = sz;
|
|
|
|
if (sz) {
|
|
|
|
memcpy(array, sample->user_stack.data, sz);
|
|
|
|
array = (void *)array + sz;
|
|
|
|
*array++ = sz;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_WEIGHT) {
|
|
|
|
*array = sample->weight;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_DATA_SRC) {
|
|
|
|
*array = sample->data_src;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2013-11-01 13:51:38 +00:00
|
|
|
if (type & PERF_SAMPLE_TRANSACTION) {
|
|
|
|
*array = sample->transaction;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2014-09-24 11:48:39 +00:00
|
|
|
if (type & PERF_SAMPLE_REGS_INTR) {
|
|
|
|
if (sample->intr_regs.abi) {
|
|
|
|
*array++ = sample->intr_regs.abi;
|
|
|
|
sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
|
|
|
|
memcpy(array, sample->intr_regs.regs, sz);
|
|
|
|
array = (void *)array + sz;
|
|
|
|
} else {
|
|
|
|
*array++ = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-28 09:03:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2012-09-11 22:24:23 +00:00
|
|
|
|
2012-09-18 14:21:50 +00:00
|
|
|
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
|
|
|
|
{
|
|
|
|
return pevent_find_field(evsel->tp_format, name);
|
|
|
|
}
|
|
|
|
|
2012-09-26 23:22:00 +00:00
|
|
|
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
|
2012-09-11 22:24:23 +00:00
|
|
|
const char *name)
|
|
|
|
{
|
2012-09-18 14:21:50 +00:00
|
|
|
struct format_field *field = perf_evsel__field(evsel, name);
|
2012-09-11 22:24:23 +00:00
|
|
|
int offset;
|
|
|
|
|
2012-09-18 14:21:50 +00:00
|
|
|
if (!field)
|
|
|
|
return NULL;
|
2012-09-11 22:24:23 +00:00
|
|
|
|
|
|
|
offset = field->offset;
|
|
|
|
|
|
|
|
if (field->flags & FIELD_IS_DYNAMIC) {
|
|
|
|
offset = *(int *)(sample->raw_data + field->offset);
|
|
|
|
offset &= 0xffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sample->raw_data + offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
|
|
|
const char *name)
|
|
|
|
{
|
2012-09-18 14:21:50 +00:00
|
|
|
struct format_field *field = perf_evsel__field(evsel, name);
|
2012-09-26 16:13:04 +00:00
|
|
|
void *ptr;
|
|
|
|
u64 value;
|
2012-09-11 22:24:23 +00:00
|
|
|
|
2012-09-18 14:21:50 +00:00
|
|
|
if (!field)
|
|
|
|
return 0;
|
2012-09-11 22:24:23 +00:00
|
|
|
|
2012-09-26 16:13:04 +00:00
|
|
|
ptr = sample->raw_data + field->offset;
|
2012-09-11 22:24:23 +00:00
|
|
|
|
2012-09-26 16:13:04 +00:00
|
|
|
switch (field->size) {
|
|
|
|
case 1:
|
|
|
|
return *(u8 *)ptr;
|
|
|
|
case 2:
|
|
|
|
value = *(u16 *)ptr;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
value = *(u32 *)ptr;
|
|
|
|
break;
|
|
|
|
case 8:
|
perf timechart: Fix SIBGUS error on sparc64
perf timechart -T on sparc64 is terminating due to SIGBUS. Backtrace:
Program received signal SIGBUS, Bus error.
0x0000000000173d7c in perf_evsel__intval (evsel=<value optimized out>, sample=0x7feffffda28, name=0x289b28 "prev_state")
at util/evsel.c:1918
1918 util/evsel.c: No such file or directory.
in util/evsel.c
Missing separate debuginfos, use: debuginfo-install audit-libs-2.3.7-1.0.1.el6.sparc64 bzip2-libs-1.0.5-7.el6_0.sparc64 elfutils-libelf-0.155-2.0.3.el6.sparc64 elfutils-libs-0.155-2.0.3.el6.sparc64 glibc-2.12-1.132.0.8.el6_5.sparc64 numactl-2.0.7-8.el6.sparc64 python-libs-2.6.6-52.0.2.el6.sparc64 slang-2.2.1-1.el6.sparc64 xz-libs-4.999.9-0.3.beta.20091007git.el6.sparc64 zlib-1.2.3-29.el6.sparc64
(gdb) bt
0 0x0000000000173d7c in perf_evsel__intval (evsel=<value optimized out>, sample=0x7feffffda28,
name=0x289b28 "prev_state") at util/evsel.c:1918
1 0x0000000000123b94 in process_sample_sched_switch (tchart=0x7feffffe040, evsel=0x4ca850, sample=0x7feffffda28,
backtrace=0xc39010 "") at builtin-timechart.c:627
2 0x0000000000122828 in process_sample_event (tool=0x7feffffe040, event=<value optimized out>, sample=0x7feffffda28,
evsel=0x4ca850, machine=0x4c9c88) at builtin-timechart.c:569
Another extended load on unaligned pointer. As before fix by copying to
a temporary variable using memcpy.
Signed-off-by: David Ahern <david.ahern@oracle.com>
Link: http://lkml.kernel.org/r/1427228049-51893-1-git-send-email-david.ahern@oracle.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-03-24 20:14:09 +00:00
|
|
|
memcpy(&value, ptr, sizeof(u64));
|
2012-09-26 16:13:04 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!evsel->needs_swap)
|
|
|
|
return value;
|
|
|
|
|
|
|
|
switch (field->size) {
|
|
|
|
case 2:
|
|
|
|
return bswap_16(value);
|
|
|
|
case 4:
|
|
|
|
return bswap_32(value);
|
|
|
|
case 8:
|
|
|
|
return bswap_64(value);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2012-09-11 22:24:23 +00:00
|
|
|
}
|
2012-12-10 21:17:08 +00:00
|
|
|
|
|
|
|
static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!*first) {
|
|
|
|
ret += fprintf(fp, ",");
|
|
|
|
} else {
|
|
|
|
ret += fprintf(fp, ":");
|
|
|
|
*first = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
ret += vfprintf(fp, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
|
perf evsel: Decode read_format and sample_type in perf_evsel__fprintf
Before those fields showed just a number, now it decodes each bit:
[root@sandy linux]# perf evlist -v
cycles: sample_freq=4000, size: 96, sample_type: IP|TID|TIME|CPU|PERIOD, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING|ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-64ezdtiijolgti08ae3phxyj@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-12-11 13:54:12 +00:00
|
|
|
{
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
|
perf evsel: Decode read_format and sample_type in perf_evsel__fprintf
Before those fields showed just a number, now it decodes each bit:
[root@sandy linux]# perf evlist -v
cycles: sample_freq=4000, size: 96, sample_type: IP|TID|TIME|CPU|PERIOD, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING|ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-64ezdtiijolgti08ae3phxyj@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-12-11 13:54:12 +00:00
|
|
|
}
|
|
|
|
|
2012-12-10 21:17:08 +00:00
|
|
|
int perf_evsel__fprintf(struct perf_evsel *evsel,
|
|
|
|
struct perf_attr_details *details, FILE *fp)
|
|
|
|
{
|
|
|
|
bool first = true;
|
2013-01-22 09:09:47 +00:00
|
|
|
int printed = 0;
|
|
|
|
|
2013-02-06 20:20:02 +00:00
|
|
|
if (details->event_group) {
|
2013-01-22 09:09:47 +00:00
|
|
|
struct perf_evsel *pos;
|
|
|
|
|
|
|
|
if (!perf_evsel__is_group_leader(evsel))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (evsel->nr_members > 1)
|
|
|
|
printed += fprintf(fp, "%s{", evsel->group_name ?: "");
|
|
|
|
|
|
|
|
printed += fprintf(fp, "%s", perf_evsel__name(evsel));
|
|
|
|
for_each_group_member(pos, evsel)
|
|
|
|
printed += fprintf(fp, ",%s", perf_evsel__name(pos));
|
|
|
|
|
|
|
|
if (evsel->nr_members > 1)
|
|
|
|
printed += fprintf(fp, "}");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
printed += fprintf(fp, "%s", perf_evsel__name(evsel));
|
2012-12-10 21:17:08 +00:00
|
|
|
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 09:09:54 +00:00
|
|
|
if (details->verbose) {
|
|
|
|
printed += perf_event_attr__fprintf(fp, &evsel->attr,
|
|
|
|
__print_attr__fprintf, &first);
|
|
|
|
} else if (details->freq) {
|
2012-12-10 21:17:08 +00:00
|
|
|
printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
|
|
|
|
(u64)evsel->attr.sample_freq);
|
|
|
|
}
|
2013-01-22 09:09:47 +00:00
|
|
|
out:
|
2012-12-10 21:17:08 +00:00
|
|
|
fputc('\n', fp);
|
|
|
|
return ++printed;
|
|
|
|
}
|
2012-12-13 17:16:30 +00:00
|
|
|
|
|
|
|
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
|
|
|
|
char *msg, size_t msgsize)
|
|
|
|
{
|
2013-07-18 23:27:59 +00:00
|
|
|
if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
|
2012-12-13 17:16:30 +00:00
|
|
|
evsel->attr.type == PERF_TYPE_HARDWARE &&
|
|
|
|
evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
|
|
|
|
/*
|
|
|
|
* If it's cycles then fall back to hrtimer based
|
|
|
|
* cpu-clock-tick sw counter, which is always available even if
|
|
|
|
* no PMU support.
|
|
|
|
*
|
|
|
|
* PPC returns ENXIO until 2.6.37 (behavior changed with commit
|
|
|
|
* b0a873e).
|
|
|
|
*/
|
|
|
|
scnprintf(msg, msgsize, "%s",
|
|
|
|
"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
|
|
|
|
|
|
|
|
evsel->attr.type = PERF_TYPE_SOFTWARE;
|
|
|
|
evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
|
|
|
|
|
2013-12-26 20:41:15 +00:00
|
|
|
zfree(&evsel->name);
|
2012-12-13 17:16:30 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2012-12-13 18:10:58 +00:00
|
|
|
|
2013-11-12 19:46:16 +00:00
|
|
|
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
|
2012-12-13 18:10:58 +00:00
|
|
|
int err, char *msg, size_t size)
|
|
|
|
{
|
2014-08-14 02:22:36 +00:00
|
|
|
char sbuf[STRERR_BUFSIZE];
|
|
|
|
|
2012-12-13 18:10:58 +00:00
|
|
|
switch (err) {
|
|
|
|
case EPERM:
|
|
|
|
case EACCES:
|
2013-05-25 23:54:00 +00:00
|
|
|
return scnprintf(msg, size,
|
2012-12-13 18:10:58 +00:00
|
|
|
"You may not have permission to collect %sstats.\n"
|
|
|
|
"Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
|
|
|
|
" -1 - Not paranoid at all\n"
|
|
|
|
" 0 - Disallow raw tracepoint access for unpriv\n"
|
|
|
|
" 1 - Disallow cpu events for unpriv\n"
|
|
|
|
" 2 - Disallow kernel profiling for unpriv",
|
|
|
|
target->system_wide ? "system-wide " : "");
|
|
|
|
case ENOENT:
|
|
|
|
return scnprintf(msg, size, "The %s event is not supported.",
|
|
|
|
perf_evsel__name(evsel));
|
|
|
|
case EMFILE:
|
|
|
|
return scnprintf(msg, size, "%s",
|
|
|
|
"Too many events are opened.\n"
|
2015-05-25 20:51:54 +00:00
|
|
|
"Probably the maximum number of open file descriptors has been reached.\n"
|
|
|
|
"Hint: Try again after reducing the number of events.\n"
|
|
|
|
"Hint: Try increasing the limit with 'ulimit -n <limit>'");
|
2012-12-13 18:10:58 +00:00
|
|
|
case ENODEV:
|
|
|
|
if (target->cpu_list)
|
|
|
|
return scnprintf(msg, size, "%s",
|
|
|
|
"No such device - did you specify an out-of-range profile CPU?\n");
|
|
|
|
break;
|
|
|
|
case EOPNOTSUPP:
|
|
|
|
if (evsel->attr.precise_ip)
|
|
|
|
return scnprintf(msg, size, "%s",
|
|
|
|
"\'precise\' request may not be supported. Try removing 'p' modifier.");
|
|
|
|
#if defined(__i386__) || defined(__x86_64__)
|
|
|
|
if (evsel->attr.type == PERF_TYPE_HARDWARE)
|
|
|
|
return scnprintf(msg, size, "%s",
|
|
|
|
"No hardware sampling interrupt available.\n"
|
|
|
|
"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
|
|
|
|
#endif
|
|
|
|
break;
|
2014-08-01 15:46:54 +00:00
|
|
|
case EBUSY:
|
|
|
|
if (find_process("oprofiled"))
|
|
|
|
return scnprintf(msg, size,
|
|
|
|
"The PMU counters are busy/taken by another profiler.\n"
|
|
|
|
"We found oprofile daemon running, please stop it and try again.");
|
|
|
|
break;
|
2015-03-30 22:19:31 +00:00
|
|
|
case EINVAL:
|
|
|
|
if (perf_missing_features.clockid)
|
|
|
|
return scnprintf(msg, size, "clockid feature not supported.");
|
|
|
|
if (perf_missing_features.clockid_wrong)
|
|
|
|
return scnprintf(msg, size, "wrong clockid (%d).", clockid);
|
|
|
|
break;
|
2012-12-13 18:10:58 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return scnprintf(msg, size,
|
2014-08-14 02:22:36 +00:00
|
|
|
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
|
2012-12-13 18:10:58 +00:00
|
|
|
"/bin/dmesg may provide additional information.\n"
|
|
|
|
"No CONFIG_PERF_EVENTS=y kernel support configured?\n",
|
2014-08-14 02:22:36 +00:00
|
|
|
err, strerror_r(err, sbuf, sizeof(sbuf)),
|
|
|
|
perf_evsel__name(evsel));
|
2012-12-13 18:10:58 +00:00
|
|
|
}
|