linux/tools/perf/util/evsel.h
Kan Liang 384b60557b perf tools: Construct LBR call chain
LBR call stack only has user-space callchains. It is output in the
PERF_SAMPLE_BRANCH_STACK data format. For kernel callchains, it's
still in the form of PERF_SAMPLE_CALLCHAIN.

The perf tool has to handle both data sources to construct a
complete callstack.

For the "perf report -D" option, both lbr and fp information will be
displayed.

A new call chain recording option "lbr" is introduced into the perf
tool for LBR call stack. The user can use --call-graph lbr to get
the call stack information from hardware.

Here are some examples.

When profiling bc(1) on Fedora 19:

  echo 'scale=2000; 4*a(1)' > cmd; perf record --call-graph lbr bc -l < cmd

If enabling LBR, perf report output looks like:

    50.36%       bc  bc                 [.] bc_divide
                 |
                 --- bc_divide
                     execute
                     run_code
                     yyparse
                     main
                     __libc_start_main
                     _start
    33.66%       bc  bc                 [.] _one_mult
                 |
                 --- _one_mult
                     bc_divide
                     execute
                     run_code
                     yyparse
                     main
                     __libc_start_main
                     _start
     7.62%       bc  bc                 [.] _bc_do_add
                 |
                 --- _bc_do_add
                    |
                    |--99.89%-- 0x2000186a8
                     --0.11%-- [...]
     6.83%       bc  bc                 [.] _bc_do_sub
                 |
                 --- _bc_do_sub
                    |
                    |--99.94%-- bc_add
                    |          execute
                    |          run_code
                    |          yyparse
                    |          main
                    |          __libc_start_main
                    |          _start
                     --0.06%-- [...]
     0.46%       bc  libc-2.17.so       [.] __memset_sse2
                 |
                 --- __memset_sse2
                    |
                    |--54.13%-- bc_new_num
                    |          |
                    |          |--51.00%-- bc_divide
                    |          |          execute
                    |          |          run_code
                    |          |          yyparse
                    |          |          main
                    |          |          __libc_start_main
                    |          |          _start
                    |          |
                    |          |--30.46%-- _bc_do_sub
                    |          |          bc_add
                    |          |          execute
                    |          |          run_code
                    |          |          yyparse
                    |          |          main
                    |          |          __libc_start_main
                    |          |          _start
                    |          |
                    |           --18.55%-- _bc_do_add
                    |                     bc_add
                    |                     execute
                    |                     run_code
                    |                     yyparse
                    |                     main
                    |                     __libc_start_main
                    |                     _start
                    |
                     --45.87%-- bc_divide
                               execute
                               run_code
                               yyparse
                               main
                               __libc_start_main
                               _start

If using FP, perf report output looks like:

  echo 'scale=2000; 4*a(1)' > cmd; perf record --call-graph fp bc -l < cmd

    50.49%       bc  bc                 [.] bc_divide
                 |
                 --- bc_divide
    33.57%       bc  bc                 [.] _one_mult
                 |
                 --- _one_mult
     7.61%       bc  bc                 [.] _bc_do_add
                 |
                 --- _bc_do_add
                     0x2000186a8
     6.88%       bc  bc                 [.] _bc_do_sub
                 |
                 --- _bc_do_sub
     0.42%       bc  libc-2.17.so       [.] __memcpy_ssse3_back
                 |
                 --- __memcpy_ssse3_back

If using LBR, perf report -D output looks like:

3458145275743 0x2fd750 [0xd8]: PERF_RECORD_SAMPLE(IP, 0x2): 9748/9748: 0x408ea8 period: 609644 addr: 0
... LBR call chain: nr:8
.....  0: fffffffffffffe00
.....  1: 0000000000408e50
.....  2: 000000000040a458
.....  3: 000000000040562e
.....  4: 0000000000408590
.....  5: 00000000004022c0
.....  6: 00000000004015dd
.....  7: 0000003d1cc21b43
... FP chain: nr:2
.....  0: fffffffffffffe00
.....  1: 0000000000408ea8
 ... thread: bc:9748
 ...... dso: /usr/bin/bc

The LBR call stack has the following known limitations:

 - Zero length calls are not filtered out by the hardware

 - Exception handing such as setjmp/longjmp will have calls/returns not
   match

 - Pushing different return address onto the stack will have
   calls/returns not match

 - If callstack is deeper than the LBR, only the last entries are
   captured

Tested-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Kan Liang <kan.liang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Simon Que <sque@chromium.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1420482185-29830-3-git-send-email-kan.liang@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-18 17:16:18 +01:00

363 lines
10 KiB
C

#ifndef __PERF_EVSEL_H
#define __PERF_EVSEL_H 1
#include <linux/list.h>
#include <stdbool.h>
#include <stddef.h>
#include <linux/perf_event.h>
#include <linux/types.h>
#include "xyarray.h"
#include "symbol.h"
struct perf_counts_values {
union {
struct {
u64 val;
u64 ena;
u64 run;
};
u64 values[3];
};
};
struct perf_counts {
s8 scaled;
struct perf_counts_values aggr;
struct perf_counts_values cpu[];
};
struct perf_evsel;
/*
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
* more than one entry in the evlist.
*/
struct perf_sample_id {
struct hlist_node node;
u64 id;
struct perf_evsel *evsel;
int idx;
int cpu;
pid_t tid;
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
u64 period;
};
struct cgroup_sel;
/** struct perf_evsel - event selector
*
* @name - Can be set to retain the original event name passed by the user,
* so that when showing results in tools such as 'perf stat', we
* show the name used, not some alias.
* @id_pos: the position of the event id (PERF_SAMPLE_ID or
* PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
* struct sample_event
* @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
* PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
* is used there is an id sample appended to non-sample events
* @priv: And what is in its containing unnamed union are tool specific
*/
struct perf_evsel {
struct list_head node;
struct perf_event_attr attr;
char *filter;
struct xyarray *fd;
struct xyarray *sample_id;
u64 *id;
struct perf_counts *counts;
struct perf_counts *prev_raw_counts;
int idx;
u32 ids;
char *name;
double scale;
const char *unit;
bool snapshot;
struct event_format *tp_format;
union {
void *priv;
off_t id_offset;
u64 db_id;
};
struct cgroup_sel *cgrp;
void *handler;
struct cpu_map *cpus;
unsigned int sample_size;
int id_pos;
int is_pos;
bool supported;
bool needs_swap;
bool no_aux_samples;
bool immediate;
bool system_wide;
bool tracking;
bool per_pkg;
unsigned long *per_pkg_mask;
/* parse modifier helper */
int exclude_GH;
int nr_members;
int sample_read;
struct perf_evsel *leader;
char *group_name;
};
union u64_swap {
u64 val64;
u32 val32[2];
};
struct cpu_map;
struct target;
struct thread_map;
struct perf_evlist;
struct record_opts;
void perf_counts_values__scale(struct perf_counts_values *count,
bool scale, s8 *pscaled);
void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
struct perf_counts_values *count);
int perf_evsel__object_config(size_t object_size,
int (*init)(struct perf_evsel *evsel),
void (*fini)(struct perf_evsel *evsel));
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
{
return perf_evsel__new_idx(attr, 0);
}
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
{
return perf_evsel__newtp_idx(sys, name, 0);
}
struct event_format *event_format__new(const char *sys, const char *name);
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx);
void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
void perf_evsel__config(struct perf_evsel *evsel,
struct record_opts *opts);
int __perf_evsel__sample_size(u64 sample_type);
void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
#define PERF_EVSEL__MAX_ALIASES 8
extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
[PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
[PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
char *bf, size_t size);
const char *perf_evsel__name(struct perf_evsel *evsel);
const char *perf_evsel__group_name(struct perf_evsel *evsel);
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__free_counts(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
enum perf_event_sample_format bit);
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
enum perf_event_sample_format bit);
#define perf_evsel__set_sample_bit(evsel, bit) \
__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
#define perf_evsel__reset_sample_bit(evsel, bit) \
__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
void perf_evsel__set_sample_id(struct perf_evsel *evsel,
bool use_sample_identifier);
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter);
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus);
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
struct thread_map *threads);
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads);
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
struct perf_sample;
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
const char *name);
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
const char *name);
static inline char *perf_evsel__strval(struct perf_evsel *evsel,
struct perf_sample *sample,
const char *name)
{
return perf_evsel__rawptr(evsel, sample, name);
}
struct format_field;
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
#define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \
evsel->attr.config == PERF_COUNT_##c)
static inline bool perf_evsel__match2(struct perf_evsel *e1,
struct perf_evsel *e2)
{
return (e1->attr.type == e2->attr.type) &&
(e1->attr.config == e2->attr.config);
}
#define perf_evsel__cmp(a, b) \
((a) && \
(b) && \
(a)->attr.type == (b)->attr.type && \
(a)->attr.config == (b)->attr.config)
typedef int (perf_evsel__read_cb_t)(struct perf_evsel *evsel,
int cpu, int thread,
struct perf_counts_values *count);
int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread,
perf_evsel__read_cb_t cb);
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale);
/**
* perf_evsel__read_on_cpu - Read out the results on a CPU and thread
*
* @evsel - event selector to read value
* @cpu - CPU of interest
* @thread - thread of interest
*/
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread)
{
return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
}
/**
* perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
*
* @evsel - event selector to read value
* @cpu - CPU of interest
* @thread - thread of interest
*/
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
int cpu, int thread)
{
return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
}
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample);
static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
{
return list_entry(evsel->node.next, struct perf_evsel, node);
}
static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
{
return list_entry(evsel->node.prev, struct perf_evsel, node);
}
/**
* perf_evsel__is_group_leader - Return whether given evsel is a leader event
*
* @evsel - evsel selector to be tested
*
* Return %true if @evsel is a group leader or a stand-alone event
*/
static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
{
return evsel->leader == evsel;
}
/**
* perf_evsel__is_group_event - Return whether given evsel is a group event
*
* @evsel - evsel selector to be tested
*
* Return %true iff event group view is enabled and @evsel is a actual group
* leader which has other members in the group
*/
static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
{
if (!symbol_conf.event_group)
return false;
return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
}
/**
* perf_evsel__is_function_event - Return whether given evsel is a function
* trace event
*
* @evsel - evsel selector to be tested
*
* Return %true if event is function trace event
*/
static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
{
#define FUNCTION_EVENT "ftrace:function"
return evsel->name &&
!strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
#undef FUNCTION_EVENT
}
struct perf_attr_details {
bool freq;
bool verbose;
bool event_group;
};
int perf_evsel__fprintf(struct perf_evsel *evsel,
struct perf_attr_details *details, FILE *fp);
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize);
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
int err, char *msg, size_t size);
static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
{
return evsel->idx - evsel->leader->idx;
}
#define for_each_group_member(_evsel, _leader) \
for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
(_evsel) && (_evsel)->leader == (_leader); \
(_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
static inline bool has_branch_callstack(struct perf_evsel *evsel)
{
return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
}
#endif /* __PERF_EVSEL_H */