linux/tools/perf/util/hist.c

716 lines
16 KiB
C
Raw Normal View History

#include "annotate.h"
#include "util.h"
#include "build-id.h"
#include "hist.h"
#include "session.h"
#include "sort.h"
perf diff: Percent calcs should use double values Otherwise we do integer math and the delta values round up to multiples of 1.0%. Also, calculate absolute values. Things look precise now: $ perf report -i perf.data.old --sort dso,symbol | head -13 9.02% libc-2.10.1.so [.] _IO_vfprintf_internal 4.88% find [.] 0x00000000014af0 2.91% [kernel] [k] __kmalloc 2.85% [kernel] [k] ext4_htree_store_dirent 2.50% libc-2.10.1.so [.] __GI_memmove 2.44% [kernel] [k] half_md4_transform 2.43% [kernel] [k] _spin_lock 2.33% [kernel] [k] system_call $ perf report -i perf.data --sort dso,symbol | head -13 8.55% libc-2.10.1.so [.] _IO_vfprintf_internal 3.11% [kernel] [k] __kmalloc 3.07% [kernel] [k] ext4_htree_store_dirent 2.66% find [.] 0x00000000016bcf 2.61% [kernel] [k] _atomic_dec_and_lock 2.46% [kernel] [k] half_md4_transform 2.41% libc-2.10.1.so [.] __GI_memmove 2.30% find [.] 0x00000000009219 $ perf diff | head -13 9.02% -0.47% libc-2.10.1.so [.] _IO_vfprintf_internal 2.91% +0.20% [kernel] [k] __kmalloc 2.85% +0.23% [kernel] [k] ext4_htree_store_dirent 1.99% +0.62% [kernel] [k] _atomic_dec_and_lock 2.44% +0.02% [kernel] [k] half_md4_transform 2.50% -0.09% libc-2.10.1.so [.] __GI_memmove 1.88% +0.01% [kernel] [k] __d_lookup 2.43% -0.75% [kernel] [k] _spin_lock 0.97% +0.62% [kernel] [k] path_get 1.99% -0.42% libc-2.10.1.so [.] _int_malloc $ Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frédéric Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1260981109-2621-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-16 16:31:49 +00:00
#include <math.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he);
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he);
enum hist_filter {
HIST_FILTER__DSO,
HIST_FILTER__THREAD,
HIST_FILTER__PARENT,
HIST_FILTER__SYMBOL,
};
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
.min_percent = 0.5,
.order = ORDER_CALLEE
};
u16 hists__col_len(struct hists *hists, enum hist_column col)
{
return hists->col_len[col];
}
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
{
hists->col_len[col] = len;
}
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
{
if (len > hists__col_len(hists, col)) {
hists__set_col_len(hists, col, len);
return true;
}
return false;
}
void hists__reset_col_len(struct hists *hists)
{
enum hist_column col;
for (col = 0; col < HISTC_NR_COLS; ++col)
hists__set_col_len(hists, col, 0);
}
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
if (hists__col_len(hists, dso) < unresolved_col_width &&
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
!symbol_conf.dso_list)
hists__set_col_len(hists, dso, unresolved_col_width);
}
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
{
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
u16 len;
if (h->ms.sym)
hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
else
hists__set_unres_dso_col_len(hists, HISTC_DSO);
len = thread__comm_len(h->thread);
if (hists__new_col_len(hists, HISTC_COMM, len))
hists__set_col_len(hists, HISTC_THREAD, len + 6);
if (h->ms.map) {
len = dso__name_len(h->ms.map->dso);
hists__new_col_len(hists, HISTC_DSO, len);
}
if (h->branch_info) {
int symlen;
/*
* +4 accounts for '[x] ' priv level info
* +2 account of 0x prefix on raw addresses
*/
if (h->branch_info->from.sym) {
symlen = (int)h->branch_info->from.sym->namelen + 4;
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
symlen = dso__name_len(h->branch_info->from.map->dso);
hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
}
if (h->branch_info->to.sym) {
symlen = (int)h->branch_info->to.sym->namelen + 4;
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
symlen = dso__name_len(h->branch_info->to.map->dso);
hists__new_col_len(hists, HISTC_DSO_TO, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
}
}
}
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
struct rb_node *next = rb_first(&hists->entries);
struct hist_entry *n;
int row = 0;
hists__reset_col_len(hists);
while (next && row++ < max_rows) {
n = rb_entry(next, struct hist_entry, rb_node);
if (!n->filtered)
hists__calc_col_len(hists, n);
next = rb_next(&n->rb_node);
}
}
static void hist_entry__add_cpumode_period(struct hist_entry *he,
unsigned int cpumode, u64 period)
{
switch (cpumode) {
case PERF_RECORD_MISC_KERNEL:
he->stat.period_sys += period;
break;
case PERF_RECORD_MISC_USER:
he->stat.period_us += period;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
he->stat.period_guest_sys += period;
break;
case PERF_RECORD_MISC_GUEST_USER:
he->stat.period_guest_us += period;
break;
default:
break;
}
}
static void he_stat__add_period(struct he_stat *he_stat, u64 period)
{
he_stat->period += period;
he_stat->nr_events += 1;
}
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
{
dest->period += src->period;
dest->period_sys += src->period_sys;
dest->period_us += src->period_us;
dest->period_guest_sys += src->period_guest_sys;
dest->period_guest_us += src->period_guest_us;
dest->nr_events += src->nr_events;
}
perf top: Reuse the 'report' hist_entry/hists classes This actually fixes several problems we had in the old 'perf top': 1. Unresolved symbols not show, limitation that came from the old "KernelTop" codebase, to solve it we would need to do changes that would make sym_entry have most of the hist_entry fields. 2. It was using the number of samples, not the sum of sample->period. And brings the --sort code that allows us to have all the views in 'perf report', for instance: [root@emilia ~]# perf top --sort dso PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ 31.59% libcrypto.so.1.0.0 21.55% [kernel] 18.57% libpython2.6.so.1.0 7.04% libc-2.12.so 6.99% _backend_agg.so 4.72% sshd 1.48% multiarray.so 1.39% libfreetype.so.6.3.22 1.37% perf 0.71% libgobject-2.0.so.0.2200.5 0.53% [tg3] 0.48% libglib-2.0.so.0.2200.5 0.44% libstdc++.so.6.0.13 0.40% libcairo.so.2.10800.8 0.38% libm-2.12.so 0.34% umath.so 0.30% libgdk-x11-2.0.so.0.1800.9 0.22% libpthread-2.12.so 0.20% libgtk-x11-2.0.so.0.1800.9 0.20% librt-2.12.so 0.15% _path.so 0.13% libpango-1.0.so.0.2800.1 0.11% libatlas.so.3.0 0.09% ft2font.so 0.09% libpangoft2-1.0.so.0.2800.1 0.08% libX11.so.6.3.0 0.07% [vdso] 0.06% cyclictest ^C All the filter lists can be used as well: --dsos, --comms, --symbols, etc. The 'perf report' TUI is also reused, being possible to apply all the zoom operations, do annotation, etc. This change will allow multiple simplifications in the symbol system as well, that will be detailed in upcoming changesets. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-05 22:16:15 +00:00
static void hist_entry__decay(struct hist_entry *he)
{
he->stat.period = (he->stat.period * 7) / 8;
he->stat.nr_events = (he->stat.nr_events * 7) / 8;
perf top: Reuse the 'report' hist_entry/hists classes This actually fixes several problems we had in the old 'perf top': 1. Unresolved symbols not show, limitation that came from the old "KernelTop" codebase, to solve it we would need to do changes that would make sym_entry have most of the hist_entry fields. 2. It was using the number of samples, not the sum of sample->period. And brings the --sort code that allows us to have all the views in 'perf report', for instance: [root@emilia ~]# perf top --sort dso PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ 31.59% libcrypto.so.1.0.0 21.55% [kernel] 18.57% libpython2.6.so.1.0 7.04% libc-2.12.so 6.99% _backend_agg.so 4.72% sshd 1.48% multiarray.so 1.39% libfreetype.so.6.3.22 1.37% perf 0.71% libgobject-2.0.so.0.2200.5 0.53% [tg3] 0.48% libglib-2.0.so.0.2200.5 0.44% libstdc++.so.6.0.13 0.40% libcairo.so.2.10800.8 0.38% libm-2.12.so 0.34% umath.so 0.30% libgdk-x11-2.0.so.0.1800.9 0.22% libpthread-2.12.so 0.20% libgtk-x11-2.0.so.0.1800.9 0.20% librt-2.12.so 0.15% _path.so 0.13% libpango-1.0.so.0.2800.1 0.11% libatlas.so.3.0 0.09% ft2font.so 0.09% libpangoft2-1.0.so.0.2800.1 0.08% libX11.so.6.3.0 0.07% [vdso] 0.06% cyclictest ^C All the filter lists can be used as well: --dsos, --comms, --symbols, etc. The 'perf report' TUI is also reused, being possible to apply all the zoom operations, do annotation, etc. This change will allow multiple simplifications in the symbol system as well, that will be detailed in upcoming changesets. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-05 22:16:15 +00:00
}
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
u64 prev_period = he->stat.period;
if (prev_period == 0)
return true;
perf top: Reuse the 'report' hist_entry/hists classes This actually fixes several problems we had in the old 'perf top': 1. Unresolved symbols not show, limitation that came from the old "KernelTop" codebase, to solve it we would need to do changes that would make sym_entry have most of the hist_entry fields. 2. It was using the number of samples, not the sum of sample->period. And brings the --sort code that allows us to have all the views in 'perf report', for instance: [root@emilia ~]# perf top --sort dso PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ 31.59% libcrypto.so.1.0.0 21.55% [kernel] 18.57% libpython2.6.so.1.0 7.04% libc-2.12.so 6.99% _backend_agg.so 4.72% sshd 1.48% multiarray.so 1.39% libfreetype.so.6.3.22 1.37% perf 0.71% libgobject-2.0.so.0.2200.5 0.53% [tg3] 0.48% libglib-2.0.so.0.2200.5 0.44% libstdc++.so.6.0.13 0.40% libcairo.so.2.10800.8 0.38% libm-2.12.so 0.34% umath.so 0.30% libgdk-x11-2.0.so.0.1800.9 0.22% libpthread-2.12.so 0.20% libgtk-x11-2.0.so.0.1800.9 0.20% librt-2.12.so 0.15% _path.so 0.13% libpango-1.0.so.0.2800.1 0.11% libatlas.so.3.0 0.09% ft2font.so 0.09% libpangoft2-1.0.so.0.2800.1 0.08% libX11.so.6.3.0 0.07% [vdso] 0.06% cyclictest ^C All the filter lists can be used as well: --dsos, --comms, --symbols, etc. The 'perf report' TUI is also reused, being possible to apply all the zoom operations, do annotation, etc. This change will allow multiple simplifications in the symbol system as well, that will be detailed in upcoming changesets. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-05 22:16:15 +00:00
hist_entry__decay(he);
if (!he->filtered)
hists->stats.total_period -= prev_period - he->stat.period;
return he->stat.period == 0;
perf top: Reuse the 'report' hist_entry/hists classes This actually fixes several problems we had in the old 'perf top': 1. Unresolved symbols not show, limitation that came from the old "KernelTop" codebase, to solve it we would need to do changes that would make sym_entry have most of the hist_entry fields. 2. It was using the number of samples, not the sum of sample->period. And brings the --sort code that allows us to have all the views in 'perf report', for instance: [root@emilia ~]# perf top --sort dso PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ 31.59% libcrypto.so.1.0.0 21.55% [kernel] 18.57% libpython2.6.so.1.0 7.04% libc-2.12.so 6.99% _backend_agg.so 4.72% sshd 1.48% multiarray.so 1.39% libfreetype.so.6.3.22 1.37% perf 0.71% libgobject-2.0.so.0.2200.5 0.53% [tg3] 0.48% libglib-2.0.so.0.2200.5 0.44% libstdc++.so.6.0.13 0.40% libcairo.so.2.10800.8 0.38% libm-2.12.so 0.34% umath.so 0.30% libgdk-x11-2.0.so.0.1800.9 0.22% libpthread-2.12.so 0.20% libgtk-x11-2.0.so.0.1800.9 0.20% librt-2.12.so 0.15% _path.so 0.13% libpango-1.0.so.0.2800.1 0.11% libatlas.so.3.0 0.09% ft2font.so 0.09% libpangoft2-1.0.so.0.2800.1 0.08% libX11.so.6.3.0 0.07% [vdso] 0.06% cyclictest ^C All the filter lists can be used as well: --dsos, --comms, --symbols, etc. The 'perf report' TUI is also reused, being possible to apply all the zoom operations, do annotation, etc. This change will allow multiple simplifications in the symbol system as well, that will be detailed in upcoming changesets. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-05 22:16:15 +00:00
}
static void __hists__decay_entries(struct hists *hists, bool zap_user,
bool zap_kernel, bool threaded)
perf top: Reuse the 'report' hist_entry/hists classes This actually fixes several problems we had in the old 'perf top': 1. Unresolved symbols not show, limitation that came from the old "KernelTop" codebase, to solve it we would need to do changes that would make sym_entry have most of the hist_entry fields. 2. It was using the number of samples, not the sum of sample->period. And brings the --sort code that allows us to have all the views in 'perf report', for instance: [root@emilia ~]# perf top --sort dso PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ 31.59% libcrypto.so.1.0.0 21.55% [kernel] 18.57% libpython2.6.so.1.0 7.04% libc-2.12.so 6.99% _backend_agg.so 4.72% sshd 1.48% multiarray.so 1.39% libfreetype.so.6.3.22 1.37% perf 0.71% libgobject-2.0.so.0.2200.5 0.53% [tg3] 0.48% libglib-2.0.so.0.2200.5 0.44% libstdc++.so.6.0.13 0.40% libcairo.so.2.10800.8 0.38% libm-2.12.so 0.34% umath.so 0.30% libgdk-x11-2.0.so.0.1800.9 0.22% libpthread-2.12.so 0.20% libgtk-x11-2.0.so.0.1800.9 0.20% librt-2.12.so 0.15% _path.so 0.13% libpango-1.0.so.0.2800.1 0.11% libatlas.so.3.0 0.09% ft2font.so 0.09% libpangoft2-1.0.so.0.2800.1 0.08% libX11.so.6.3.0 0.07% [vdso] 0.06% cyclictest ^C All the filter lists can be used as well: --dsos, --comms, --symbols, etc. The 'perf report' TUI is also reused, being possible to apply all the zoom operations, do annotation, etc. This change will allow multiple simplifications in the symbol system as well, that will be detailed in upcoming changesets. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-05 22:16:15 +00:00
{
struct rb_node *next = rb_first(&hists->entries);
struct hist_entry *n;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
/*
* We may be annotating this, for instance, so keep it here in
* case some it gets new samples, we'll eventually free it when
* the user stops browsing and it agains gets fully decayed.
*/
if (((zap_user && n->level == '.') ||
(zap_kernel && n->level != '.') ||
hists__decay_entry(hists, n)) &&
!n->used) {
perf top: Reuse the 'report' hist_entry/hists classes This actually fixes several problems we had in the old 'perf top': 1. Unresolved symbols not show, limitation that came from the old "KernelTop" codebase, to solve it we would need to do changes that would make sym_entry have most of the hist_entry fields. 2. It was using the number of samples, not the sum of sample->period. And brings the --sort code that allows us to have all the views in 'perf report', for instance: [root@emilia ~]# perf top --sort dso PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ 31.59% libcrypto.so.1.0.0 21.55% [kernel] 18.57% libpython2.6.so.1.0 7.04% libc-2.12.so 6.99% _backend_agg.so 4.72% sshd 1.48% multiarray.so 1.39% libfreetype.so.6.3.22 1.37% perf 0.71% libgobject-2.0.so.0.2200.5 0.53% [tg3] 0.48% libglib-2.0.so.0.2200.5 0.44% libstdc++.so.6.0.13 0.40% libcairo.so.2.10800.8 0.38% libm-2.12.so 0.34% umath.so 0.30% libgdk-x11-2.0.so.0.1800.9 0.22% libpthread-2.12.so 0.20% libgtk-x11-2.0.so.0.1800.9 0.20% librt-2.12.so 0.15% _path.so 0.13% libpango-1.0.so.0.2800.1 0.11% libatlas.so.3.0 0.09% ft2font.so 0.09% libpangoft2-1.0.so.0.2800.1 0.08% libX11.so.6.3.0 0.07% [vdso] 0.06% cyclictest ^C All the filter lists can be used as well: --dsos, --comms, --symbols, etc. The 'perf report' TUI is also reused, being possible to apply all the zoom operations, do annotation, etc. This change will allow multiple simplifications in the symbol system as well, that will be detailed in upcoming changesets. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-05 22:16:15 +00:00
rb_erase(&n->rb_node, &hists->entries);
if (sort__need_collapse || threaded)
perf top: Reuse the 'report' hist_entry/hists classes This actually fixes several problems we had in the old 'perf top': 1. Unresolved symbols not show, limitation that came from the old "KernelTop" codebase, to solve it we would need to do changes that would make sym_entry have most of the hist_entry fields. 2. It was using the number of samples, not the sum of sample->period. And brings the --sort code that allows us to have all the views in 'perf report', for instance: [root@emilia ~]# perf top --sort dso PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ 31.59% libcrypto.so.1.0.0 21.55% [kernel] 18.57% libpython2.6.so.1.0 7.04% libc-2.12.so 6.99% _backend_agg.so 4.72% sshd 1.48% multiarray.so 1.39% libfreetype.so.6.3.22 1.37% perf 0.71% libgobject-2.0.so.0.2200.5 0.53% [tg3] 0.48% libglib-2.0.so.0.2200.5 0.44% libstdc++.so.6.0.13 0.40% libcairo.so.2.10800.8 0.38% libm-2.12.so 0.34% umath.so 0.30% libgdk-x11-2.0.so.0.1800.9 0.22% libpthread-2.12.so 0.20% libgtk-x11-2.0.so.0.1800.9 0.20% librt-2.12.so 0.15% _path.so 0.13% libpango-1.0.so.0.2800.1 0.11% libatlas.so.3.0 0.09% ft2font.so 0.09% libpangoft2-1.0.so.0.2800.1 0.08% libX11.so.6.3.0 0.07% [vdso] 0.06% cyclictest ^C All the filter lists can be used as well: --dsos, --comms, --symbols, etc. The 'perf report' TUI is also reused, being possible to apply all the zoom operations, do annotation, etc. This change will allow multiple simplifications in the symbol system as well, that will be detailed in upcoming changesets. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-05 22:16:15 +00:00
rb_erase(&n->rb_node_in, &hists->entries_collapsed);
hist_entry__free(n);
--hists->nr_entries;
}
}
}
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
return __hists__decay_entries(hists, zap_user, zap_kernel, false);
}
void hists__decay_entries_threaded(struct hists *hists,
bool zap_user, bool zap_kernel)
{
return __hists__decay_entries(hists, zap_user, zap_kernel, true);
}
/*
* histogram, sorted on item, collects periods
*/
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
if (he != NULL) {
*he = *template;
if (he->ms.map)
he->ms.map->referenced = true;
if (symbol_conf.use_callchain)
callchain_init(he->callchain);
}
return he;
}
static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
{
if (!h->filtered) {
hists__calc_col_len(hists, h);
++hists->nr_entries;
hists->stats.total_period += h->stat.period;
}
}
static u8 symbol__parent_filter(const struct symbol *parent)
{
if (symbol_conf.exclude_other && parent == NULL)
return 1 << HIST_FILTER__PARENT;
return 0;
}
static struct hist_entry *add_hist_entry(struct hists *hists,
struct hist_entry *entry,
2010-05-10 16:04:11 +00:00
struct addr_location *al,
u64 period)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int cmp;
pthread_mutex_lock(&hists->lock);
p = &hists->entries_in->rb_node;
while (*p != NULL) {
parent = *p;
he = rb_entry(parent, struct hist_entry, rb_node_in);
cmp = hist_entry__cmp(entry, he);
if (!cmp) {
he_stat__add_period(&he->stat, period);
/* If the map of an existing hist_entry has
* become out-of-date due to an exec() or
* similar, update it. Otherwise we will
* mis-adjust symbol addresses when computing
* the history counter to increment.
*/
if (he->ms.map != entry->ms.map) {
he->ms.map = entry->ms.map;
if (he->ms.map)
he->ms.map->referenced = true;
}
goto out;
}
if (cmp < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
he = hist_entry__new(entry);
if (!he)
goto out_unlock;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
hist_entry__add_cpumode_period(he, al->cpumode, period);
out_unlock:
pthread_mutex_unlock(&hists->lock);
return he;
}
struct hist_entry *__hists__add_branch_entry(struct hists *self,
struct addr_location *al,
struct symbol *sym_parent,
struct branch_info *bi,
u64 period)
{
struct hist_entry entry = {
.thread = al->thread,
.ms = {
.map = bi->to.map,
.sym = bi->to.sym,
},
.cpu = al->cpu,
.ip = bi->to.addr,
.level = al->level,
.stat = {
.period = period,
.nr_events = 1,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent),
.branch_info = bi,
.hists = self,
};
return add_hist_entry(self, &entry, al, period);
}
struct hist_entry *__hists__add_entry(struct hists *self,
struct addr_location *al,
struct symbol *sym_parent, u64 period)
{
struct hist_entry entry = {
.thread = al->thread,
.ms = {
.map = al->map,
.sym = al->sym,
},
.cpu = al->cpu,
.ip = al->addr,
.level = al->level,
.stat = {
.period = period,
.nr_events = 1,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent),
.hists = self,
};
return add_hist_entry(self, &entry, al, period);
}
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
struct sort_entry *se;
int64_t cmp = 0;
list_for_each_entry(se, &hist_entry__sort_list, list) {
cmp = se->se_cmp(left, right);
if (cmp)
break;
}
return cmp;
}
int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
struct sort_entry *se;
int64_t cmp = 0;
list_for_each_entry(se, &hist_entry__sort_list, list) {
int64_t (*f)(struct hist_entry *, struct hist_entry *);
f = se->se_collapse ?: se->se_cmp;
cmp = f(left, right);
if (cmp)
break;
}
return cmp;
}
void hist_entry__free(struct hist_entry *he)
{
free(he);
}
/*
* collapse the histogram
*/
perf tools: Use __maybe_used for unused variables perf defines both __used and __unused variables to use for marking unused variables. The variable __used is defined to __attribute__((__unused__)), which contradicts the kernel definition to __attribute__((__used__)) for new gcc versions. On Android, __used is also defined in system headers and this leads to warnings like: warning: '__used__' attribute ignored __unused is not defined in the kernel and is not a standard definition. If __unused is included everywhere instead of __used, this leads to conflicts with glibc headers, since glibc has a variables with this name in its headers. The best approach is to use __maybe_unused, the definition used in the kernel for __attribute__((unused)). In this way there is only one definition in perf sources (instead of 2 definitions that point to the same thing: __used and __unused) and it works on both Linux and Android. This patch simply replaces all instances of __used and __unused with __maybe_unused. Signed-off-by: Irina Tirdea <irina.tirdea@intel.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: David Ahern <dsahern@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1347315303-29906-7-git-send-email-irina.tirdea@intel.com [ committer note: fixed up conflict with a116e05 in builtin-sched.c ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-09-10 22:15:03 +00:00
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
perf callchain: Feed callchains into a cursor The callchains are fed with an array of a fixed size. As a result we iterate over each callchains three times: - 1st to resolve symbols - 2nd to filter out context boundaries - 3rd for the insertion into the tree This also involves some pairs of memory allocation/deallocation everytime we insert a callchain, for the filtered out array of addresses and for the array of symbols that comes along. Instead, feed the callchains through a linked list with persistent allocations. It brings several pros like: - Merge the 1st and 2nd iterations in one. That was possible before but in a way that would involve allocating an array slightly taller than necessary because we don't know in advance the number of context boundaries to filter out. - Much lesser allocations/deallocations. The linked list keeps persistent empty entries for the next usages and is extendable at will. - Makes it easier for multiple sources of callchains to feed a stacktrace together. This is deemed to pave the way for cfi based callchains wherein traditional frame pointer based kernel stacktraces will precede cfi based user ones, producing an overall callchain which size is hardly predictable. This requirement makes the static array obsolete and makes a linked list based iterator a much more flexible fit. Basic testing on a big perf file containing callchains (~ 176 MB) has shown a throughput gain of about 11% with perf report. Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1294977121-5700-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-01-14 03:51:58 +00:00
struct rb_root *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node_in);
cmp = hist_entry__collapse(iter, he);
if (!cmp) {
he_stat__add_stat(&iter->stat, &he->stat);
perf callchain: Feed callchains into a cursor The callchains are fed with an array of a fixed size. As a result we iterate over each callchains three times: - 1st to resolve symbols - 2nd to filter out context boundaries - 3rd for the insertion into the tree This also involves some pairs of memory allocation/deallocation everytime we insert a callchain, for the filtered out array of addresses and for the array of symbols that comes along. Instead, feed the callchains through a linked list with persistent allocations. It brings several pros like: - Merge the 1st and 2nd iterations in one. That was possible before but in a way that would involve allocating an array slightly taller than necessary because we don't know in advance the number of context boundaries to filter out. - Much lesser allocations/deallocations. The linked list keeps persistent empty entries for the next usages and is extendable at will. - Makes it easier for multiple sources of callchains to feed a stacktrace together. This is deemed to pave the way for cfi based callchains wherein traditional frame pointer based kernel stacktraces will precede cfi based user ones, producing an overall callchain which size is hardly predictable. This requirement makes the static array obsolete and makes a linked list based iterator a much more flexible fit. Basic testing on a big perf file containing callchains (~ 176 MB) has shown a throughput gain of about 11% with perf report. Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1294977121-5700-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-01-14 03:51:58 +00:00
if (symbol_conf.use_callchain) {
callchain_cursor_reset(&callchain_cursor);
callchain_merge(&callchain_cursor,
iter->callchain,
perf callchain: Feed callchains into a cursor The callchains are fed with an array of a fixed size. As a result we iterate over each callchains three times: - 1st to resolve symbols - 2nd to filter out context boundaries - 3rd for the insertion into the tree This also involves some pairs of memory allocation/deallocation everytime we insert a callchain, for the filtered out array of addresses and for the array of symbols that comes along. Instead, feed the callchains through a linked list with persistent allocations. It brings several pros like: - Merge the 1st and 2nd iterations in one. That was possible before but in a way that would involve allocating an array slightly taller than necessary because we don't know in advance the number of context boundaries to filter out. - Much lesser allocations/deallocations. The linked list keeps persistent empty entries for the next usages and is extendable at will. - Makes it easier for multiple sources of callchains to feed a stacktrace together. This is deemed to pave the way for cfi based callchains wherein traditional frame pointer based kernel stacktraces will precede cfi based user ones, producing an overall callchain which size is hardly predictable. This requirement makes the static array obsolete and makes a linked list based iterator a much more flexible fit. Basic testing on a big perf file containing callchains (~ 176 MB) has shown a throughput gain of about 11% with perf report. Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1294977121-5700-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-01-14 03:51:58 +00:00
he->callchain);
}
hist_entry__free(he);
return false;
}
if (cmp < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
return true;
}
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
{
struct rb_root *root;
pthread_mutex_lock(&hists->lock);
root = hists->entries_in;
if (++hists->entries_in > &hists->entries_in_array[1])
hists->entries_in = &hists->entries_in_array[0];
pthread_mutex_unlock(&hists->lock);
return root;
}
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
hists__filter_entry_by_dso(hists, he);
hists__filter_entry_by_thread(hists, he);
hists__filter_entry_by_symbol(hists, he);
}
static void __hists__collapse_resort(struct hists *hists, bool threaded)
{
struct rb_root *root;
struct rb_node *next;
struct hist_entry *n;
if (!sort__need_collapse && !threaded)
return;
root = hists__get_rotate_entries_in(hists);
next = rb_first(root);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
rb_erase(&n->rb_node_in, root);
if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
/*
* If it wasn't combined with one of the entries already
* collapsed, we need to apply the filters that may have
* been set by, say, the hist_browser.
*/
hists__apply_filters(hists, n);
}
}
}
void hists__collapse_resort(struct hists *hists)
{
return __hists__collapse_resort(hists, false);
}
void hists__collapse_resort_threaded(struct hists *hists)
{
return __hists__collapse_resort(hists, true);
}
/*
* reverse the map, sort on period.
*/
2010-05-10 16:04:11 +00:00
static void __hists__insert_output_entry(struct rb_root *entries,
struct hist_entry *he,
u64 min_callchain_hits)
{
2010-05-10 16:04:11 +00:00
struct rb_node **p = &entries->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
if (symbol_conf.use_callchain)
callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (he->stat.period > iter->stat.period)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&he->rb_node, parent, p);
2010-05-10 16:04:11 +00:00
rb_insert_color(&he->rb_node, entries);
}
static void __hists__output_resort(struct hists *hists, bool threaded)
{
struct rb_root *root;
struct rb_node *next;
struct hist_entry *n;
u64 min_callchain_hits;
min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
if (sort__need_collapse || threaded)
root = &hists->entries_collapsed;
else
root = hists->entries_in;
next = rb_first(root);
hists->entries = RB_ROOT;
hists->nr_entries = 0;
hists->stats.total_period = 0;
hists__reset_col_len(hists);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
hists__inc_nr_entries(hists, n);
}
}
void hists__output_resort(struct hists *hists)
{
return __hists__output_resort(hists, false);
}
void hists__output_resort_threaded(struct hists *hists)
{
return __hists__output_resort(hists, true);
}
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
enum hist_filter filter)
{
h->filtered &= ~(1 << filter);
if (h->filtered)
return;
++hists->nr_entries;
if (h->ms.unfolded)
hists->nr_entries += h->nr_rows;
h->row_offset = 0;
hists->stats.total_period += h->stat.period;
hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
hists__calc_col_len(hists, h);
}
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he)
{
if (hists->dso_filter != NULL &&
(he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
he->filtered |= (1 << HIST_FILTER__DSO);
return true;
}
return false;
}
void hists__filter_by_dso(struct hists *hists)
{
struct rb_node *nd;
hists->nr_entries = hists->stats.total_period = 0;
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (symbol_conf.exclude_other && !h->parent)
continue;
if (hists__filter_entry_by_dso(hists, h))
continue;
hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
}
}
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
if (hists->thread_filter != NULL &&
he->thread != hists->thread_filter) {
he->filtered |= (1 << HIST_FILTER__THREAD);
return true;
}
return false;
}
void hists__filter_by_thread(struct hists *hists)
{
struct rb_node *nd;
hists->nr_entries = hists->stats.total_period = 0;
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (hists__filter_entry_by_thread(hists, h))
continue;
hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
}
}
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he)
{
if (hists->symbol_filter_str != NULL &&
(!he->ms.sym || strstr(he->ms.sym->name,
hists->symbol_filter_str) == NULL)) {
he->filtered |= (1 << HIST_FILTER__SYMBOL);
return true;
}
return false;
}
void hists__filter_by_symbol(struct hists *hists)
{
struct rb_node *nd;
hists->nr_entries = hists->stats.total_period = 0;
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (hists__filter_entry_by_symbol(hists, h))
continue;
hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
}
}
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
{
return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
}
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
{
return symbol__annotate(he->ms.sym, he->ms.map, privsize);
}
void hists__inc_nr_events(struct hists *hists, u32 type)
{
++hists->stats.nr_events[0];
++hists->stats.nr_events[type];
}