2009-09-24 16:02:18 +00:00
|
|
|
#ifndef __PERF_SYMBOL
|
|
|
|
#define __PERF_SYMBOL 1
|
2009-05-28 17:55:04 +00:00
|
|
|
|
|
|
|
#include <linux/types.h>
|
2009-10-20 16:25:40 +00:00
|
|
|
#include <stdbool.h>
|
2010-03-25 22:59:00 +00:00
|
|
|
#include <stdint.h>
|
|
|
|
#include "map.h"
|
2012-02-09 22:21:01 +00:00
|
|
|
#include "../perf.h"
|
2009-07-01 17:46:08 +00:00
|
|
|
#include <linux/list.h>
|
2009-07-01 15:28:37 +00:00
|
|
|
#include <linux/rbtree.h>
|
2010-03-25 22:59:00 +00:00
|
|
|
#include <stdio.h>
|
2012-05-30 12:23:42 +00:00
|
|
|
#include <byteswap.h>
|
2012-09-08 00:43:17 +00:00
|
|
|
#include <libgen.h>
|
2012-10-27 21:18:29 +00:00
|
|
|
#include "build-id.h"
|
2014-05-05 10:41:45 +00:00
|
|
|
#include "event.h"
|
2014-07-29 13:21:58 +00:00
|
|
|
#include "util.h"
|
2009-05-28 17:55:04 +00:00
|
|
|
|
2013-09-30 10:07:11 +00:00
|
|
|
#ifdef HAVE_LIBELF_SUPPORT
|
2012-08-10 22:22:57 +00:00
|
|
|
#include <libelf.h>
|
|
|
|
#include <gelf.h>
|
|
|
|
#endif
|
2012-12-28 07:16:49 +00:00
|
|
|
#include <elf.h>
|
2012-08-10 22:22:57 +00:00
|
|
|
|
2012-10-27 21:18:32 +00:00
|
|
|
#include "dso.h"
|
|
|
|
|
2009-10-24 16:10:36 +00:00
|
|
|
/*
|
|
|
|
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
|
|
|
|
* for newer versions we can use mmap to reduce memory usage:
|
|
|
|
*/
|
2013-09-30 10:07:11 +00:00
|
|
|
#ifdef HAVE_LIBELF_MMAP_SUPPORT
|
2009-10-24 16:10:36 +00:00
|
|
|
# define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP
|
2012-09-28 09:31:59 +00:00
|
|
|
#else
|
|
|
|
# define PERF_ELF_C_READ_MMAP ELF_C_READ
|
2009-10-24 16:10:36 +00:00
|
|
|
#endif
|
|
|
|
|
2014-01-16 09:39:49 +00:00
|
|
|
#ifdef HAVE_LIBELF_SUPPORT
|
|
|
|
extern Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
|
|
|
|
GElf_Shdr *shp, const char *name, size_t *idx);
|
|
|
|
#endif
|
|
|
|
|
2009-08-11 19:22:11 +00:00
|
|
|
#ifndef DMGL_PARAMS
|
2014-07-31 05:47:42 +00:00
|
|
|
#define DMGL_NO_OPTS 0 /* For readability... */
|
2009-08-11 19:22:11 +00:00
|
|
|
#define DMGL_PARAMS (1 << 0) /* Include function args */
|
|
|
|
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
|
|
|
|
#endif
|
|
|
|
|
2011-03-11 16:36:01 +00:00
|
|
|
/** struct symbol - symtab entry
|
|
|
|
*
|
|
|
|
* @ignore - resolvable but tools ignore it (e.g. idle routines)
|
|
|
|
*/
|
2009-05-28 17:55:04 +00:00
|
|
|
struct symbol {
|
|
|
|
struct rb_node rb_node;
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 12:21:42 +00:00
|
|
|
u64 start;
|
|
|
|
u64 end;
|
2010-05-10 16:57:51 +00:00
|
|
|
u16 namelen;
|
2010-08-05 15:59:47 +00:00
|
|
|
u8 binding;
|
2011-03-11 16:36:01 +00:00
|
|
|
bool ignore;
|
2009-05-28 17:55:04 +00:00
|
|
|
char name[0];
|
|
|
|
};
|
|
|
|
|
2011-03-31 13:56:28 +00:00
|
|
|
void symbol__delete(struct symbol *sym);
|
2012-10-27 21:18:32 +00:00
|
|
|
void symbols__delete(struct rb_root *symbols);
|
2010-02-25 15:57:40 +00:00
|
|
|
|
perf probe: Allow to add events on the local functions
Allow to add events on the local functions without debuginfo.
(With the debuginfo, we can add events even on inlined functions)
Currently, probing on local functions requires debuginfo to
locate actual address. It is also possible without debuginfo since
we have symbol maps.
Without this change;
----
# ./perf probe -a t_show
Added new event:
probe:t_show (on t_show)
You can now use it in all perf tools, such as:
perf record -e probe:t_show -aR sleep 1
# ./perf probe -x perf -a identity__map_ip
no symbols found in /kbuild/ksrc/linux-3/tools/perf/perf, maybe install a debug package?
Failed to load map.
Error: Failed to add events. (-22)
----
As the above results, perf probe just put one event
on the first found symbol for kprobe event. Moreover,
for uprobe event, perf probe failed to find local
functions.
With this change;
----
# ./perf probe -a t_show
Added new events:
probe:t_show (on t_show)
probe:t_show_1 (on t_show)
probe:t_show_2 (on t_show)
probe:t_show_3 (on t_show)
You can now use it in all perf tools, such as:
perf record -e probe:t_show_3 -aR sleep 1
# ./perf probe -x perf -a identity__map_ip
Added new events:
probe_perf:identity__map_ip (on identity__map_ip in /kbuild/ksrc/linux-3/tools/perf/perf)
probe_perf:identity__map_ip_1 (on identity__map_ip in /kbuild/ksrc/linux-3/tools/perf/perf)
probe_perf:identity__map_ip_2 (on identity__map_ip in /kbuild/ksrc/linux-3/tools/perf/perf)
probe_perf:identity__map_ip_3 (on identity__map_ip in /kbuild/ksrc/linux-3/tools/perf/perf)
You can now use it in all perf tools, such as:
perf record -e probe_perf:identity__map_ip_3 -aR sleep 1
----
Now we succeed to put events on every given local functions
for both kprobes and uprobes. :)
Note that this also introduces some symbol rbtree
iteration macros; symbols__for_each, dso__for_each_symbol,
and map__for_each_symbol. These are for walking through
the symbol list in a map.
Changes from v2:
- Fix add_exec_to_probe_trace_events() not to convert address
to tp->symbol any more.
- Fix to set kernel probes based on ref_reloc_sym.
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: "David A. Long" <dave.long@linaro.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: yrl.pp-manager.tt@hitachi.com
Link: http://lkml.kernel.org/r/20140206053225.29635.15026.stgit@kbuild-fedora.yrl.intra.hitachi.co.jp
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-02-06 05:32:25 +00:00
|
|
|
/* symbols__for_each_entry - iterate over symbols (rb_root)
|
|
|
|
*
|
|
|
|
* @symbols: the rb_root of symbols
|
|
|
|
* @pos: the 'struct symbol *' to use as a loop cursor
|
|
|
|
* @nd: the 'struct rb_node *' to use as a temporary storage
|
|
|
|
*/
|
|
|
|
#define symbols__for_each_entry(symbols, pos, nd) \
|
|
|
|
for (nd = rb_first(symbols); \
|
|
|
|
nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
|
|
|
|
nd = rb_next(nd))
|
|
|
|
|
2012-04-19 13:57:06 +00:00
|
|
|
static inline size_t symbol__size(const struct symbol *sym)
|
|
|
|
{
|
2014-10-14 20:19:44 +00:00
|
|
|
return sym->end - sym->start;
|
2012-04-19 13:57:06 +00:00
|
|
|
}
|
|
|
|
|
2009-12-15 22:04:40 +00:00
|
|
|
struct strlist;
|
2015-03-24 15:52:41 +00:00
|
|
|
struct intlist;
|
2009-12-15 22:04:40 +00:00
|
|
|
|
2009-11-24 14:05:15 +00:00
|
|
|
struct symbol_conf {
|
|
|
|
unsigned short priv_size;
|
2011-11-12 00:17:32 +00:00
|
|
|
unsigned short nr_events;
|
2009-11-24 14:05:15 +00:00
|
|
|
bool try_vmlinux_path,
|
2013-09-14 08:32:59 +00:00
|
|
|
ignore_vmlinux,
|
2014-11-04 01:14:32 +00:00
|
|
|
ignore_vmlinux_buildid,
|
2012-01-30 04:43:20 +00:00
|
|
|
show_kernel_path,
|
perf symbols: Allow lookups by symbol name too
Configurable via symbol_conf.sort_by_name, so that the cost of an
extra rb_node on all 'struct symbol' instances is not paid by tools
that only want to decode addresses.
How to use it:
symbol_conf.sort_by_name = true;
symbol_init(&symbol_conf);
struct map *map = map_groups__find_by_name(kmaps, MAP__VARIABLE, "[kernel.kallsyms]");
if (map == NULL) {
pr_err("couldn't find map!\n");
kernel_maps__fprintf(stdout);
} else {
struct symbol *sym = map__find_symbol_by_name(map, sym_filter, NULL);
if (sym == NULL)
pr_err("couldn't find symbol %s!\n", sym_filter);
else
pr_info("symbol %s: %#Lx-%#Lx \n", sym_filter, sym->start, sym->end);
}
Looking over the vmlinux/kallsyms is common enough that I'll add a
variable to the upcoming struct perf_session to avoid the need to
use map_groups__find_by_name to get the main vmlinux/kallsyms map.
The above example looks on the 'variable' symtab, but it is just
like that for the functions one.
Also the sort operation is done when we first use
map__find_symbol_by_name, in a lazy way.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260564622-12392-1-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-11 20:50:22 +00:00
|
|
|
use_modules,
|
2015-03-06 07:31:27 +00:00
|
|
|
allow_aliases,
|
2009-12-15 22:04:42 +00:00
|
|
|
sort_by_name,
|
|
|
|
show_nr_samples,
|
2011-10-05 19:10:06 +00:00
|
|
|
show_total_period,
|
2009-12-15 22:04:42 +00:00
|
|
|
use_callchain,
|
2012-09-11 04:15:07 +00:00
|
|
|
cumulate_callchain,
|
2009-12-27 23:37:04 +00:00
|
|
|
exclude_other,
|
2010-09-09 16:30:59 +00:00
|
|
|
show_cpu_utilization,
|
perf symbols: Handle /proc/sys/kernel/kptr_restrict
Perf uses /proc/modules to figure out where kernel modules are loaded.
With the advent of kptr_restrict, non root users get zeroes for all module
start addresses.
So check if kptr_restrict is non zero and don't generate the syntethic
PERF_RECORD_MMAP events for them.
Warn the user about it in perf record and in perf report.
In perf report the reference relocation symbol being zero means that
kptr_restrict was set, thus /proc/kallsyms has only zeroed addresses, so don't
use it to fixup symbol addresses when using a valid kallsyms (in the buildid
cache) or vmlinux (in the vmlinux path) build-id located automatically or
specified by the user.
Provide an explanation about it in 'perf report' if kernel samples were taken,
checking if a suitable vmlinux or kallsyms was found/specified.
Restricted /proc/kallsyms don't go to the buildid cache anymore.
Example:
[acme@emilia ~]$ perf record -F 100000 sleep 1
WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted, check
/proc/sys/kernel/kptr_restrict.
Samples in kernel functions may not be resolved if a suitable vmlinux file is
not found in the buildid cache or in the vmlinux path.
Samples in kernel modules won't be resolved at all.
If some relocation was applied (e.g. kexec) symbols may be misresolved even
with a suitable vmlinux or kallsyms file.
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.005 MB perf.data (~231 samples) ]
[acme@emilia ~]$
[acme@emilia ~]$ perf report --stdio
Kernel address maps (/proc/{kallsyms,modules}) were restricted,
check /proc/sys/kernel/kptr_restrict before running 'perf record'.
If some relocation was applied (e.g. kexec) symbols may be misresolved.
Samples in kernel modules can't be resolved as well.
# Events: 13 cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... ................. .....................
#
20.24% sleep [kernel.kallsyms] [k] page_fault
20.04% sleep [kernel.kallsyms] [k] filemap_fault
19.78% sleep [kernel.kallsyms] [k] __lru_cache_add
19.69% sleep ld-2.12.so [.] memcpy
14.71% sleep [kernel.kallsyms] [k] dput
4.70% sleep [kernel.kallsyms] [k] flush_signal_handlers
0.73% sleep [kernel.kallsyms] [k] perf_event_comm
0.11% sleep [kernel.kallsyms] [k] native_write_msr_safe
#
# (For a higher level overview, try: perf report --sort comm,dso)
#
[acme@emilia ~]$
This is because it found a suitable vmlinux (build-id checked) in
/lib/modules/2.6.39-rc7+/build/vmlinux (use -v in perf report to see the long
file name).
If we remove that file from the vmlinux path:
[root@emilia ~]# mv /lib/modules/2.6.39-rc7+/build/vmlinux \
/lib/modules/2.6.39-rc7+/build/vmlinux.OFF
[acme@emilia ~]$ perf report --stdio
[kernel.kallsyms] with build id 57298cdbe0131f6871667ec0eaab4804dcf6f562
not found, continuing without symbols
Kernel address maps (/proc/{kallsyms,modules}) were restricted, check
/proc/sys/kernel/kptr_restrict before running 'perf record'.
As no suitable kallsyms nor vmlinux was found, kernel samples can't be
resolved.
Samples in kernel modules can't be resolved as well.
# Events: 13 cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... ................. ......
#
80.31% sleep [kernel.kallsyms] [k] 0xffffffff8103425a
19.69% sleep ld-2.12.so [.] memcpy
#
# (For a higher level overview, try: perf report --sort comm,dso)
#
[acme@emilia ~]$
Reported-by: Stephane Eranian <eranian@google.com>
Suggested-by: David Miller <davem@davemloft.net>
Cc: Dave Jones <davej@redhat.com>
Cc: David Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kees Cook <kees.cook@canonical.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/n/tip-mt512joaxxbhhp1odop04yit@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-05-26 12:53:51 +00:00
|
|
|
initialized,
|
2011-05-17 15:32:07 +00:00
|
|
|
kptr_restrict,
|
|
|
|
annotate_asm_raw,
|
2013-01-22 09:09:32 +00:00
|
|
|
annotate_src,
|
2013-03-25 09:18:18 +00:00
|
|
|
event_group,
|
2014-01-14 02:52:48 +00:00
|
|
|
demangle,
|
2014-09-13 04:15:05 +00:00
|
|
|
demangle_kernel,
|
2014-06-27 16:26:58 +00:00
|
|
|
filter_relative,
|
perf callchain: Support handling complete branch stacks as histograms
Currently branch stacks can be only shown as edge histograms for
individual branches. I never found this display particularly useful.
This implements an alternative mode that creates histograms over
complete branch traces, instead of individual branches, similar to how
normal callgraphs are handled. This is done by putting it in front of
the normal callgraph and then using the normal callgraph histogram
infrastructure to unify them.
This way in complex functions we can understand the control flow that
lead to a particular sample, and may even see some control flow in the
caller for short functions.
Example (simplified, of course for such simple code this is usually not
needed), please run this after the whole patchkit is in, as at this
point in the patch order there is no --branch-history, that will be
added in a patch after this one:
tcall.c:
volatile a = 10000, b = 100000, c;
__attribute__((noinline)) f2()
{
c = a / b;
}
__attribute__((noinline)) f1()
{
f2();
f2();
}
main()
{
int i;
for (i = 0; i < 1000000; i++)
f1();
}
% perf record -b -g ./tsrc/tcall
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.044 MB perf.data (~1923 samples) ]
% perf report --no-children --branch-history
...
54.91% tcall.c:6 [.] f2 tcall
|
|--65.53%-- f2 tcall.c:5
| |
| |--70.83%-- f1 tcall.c:11
| | f1 tcall.c:10
| | main tcall.c:18
| | main tcall.c:18
| | main tcall.c:17
| | main tcall.c:17
| | f1 tcall.c:13
| | f1 tcall.c:13
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:12
| | f1 tcall.c:12
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:11
| |
| --29.17%-- f1 tcall.c:12
| f1 tcall.c:12
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:11
| f1 tcall.c:10
| main tcall.c:18
| main tcall.c:18
| main tcall.c:17
| main tcall.c:17
| f1 tcall.c:13
| f1 tcall.c:13
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:12
The default output is unchanged.
This is only implemented in perf report, no change to record or anywhere
else.
This adds the basic code to report:
- add a new "branch" option to the -g option parser to enable this mode
- when the flag is set include the LBR into the callstack in machine.c.
The rest of the history code is unchanged and doesn't know the
difference between LBR entry and normal call entry.
- detect overlaps with the callchain
- remove small loop duplicates in the LBR
Current limitations:
- The LBR flags (mispredict etc.) are not shown in the history
and LBR entries have no special marker.
- It would be nice if annotate marked the LBR entries somehow
(e.g. with arrows)
v2: Various fixes.
v3: Merge further patches into this one. Fix white space.
v4: Improve manpage. Address review feedback.
v5: Rename functions. Better error message without -g. Fix crash without
-b.
v6: Rebase
v7: Rebase. Use NO_ENTRY in memset.
v8: Port to latest tip. Move add_callchain_ip to separate
patch. Skip initial entries in callchain. Minor cleanups.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1415844328-4884-3-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-11-13 02:05:20 +00:00
|
|
|
show_hist_headers,
|
|
|
|
branch_callstack;
|
2009-12-15 22:04:41 +00:00
|
|
|
const char *vmlinux_name,
|
2010-12-08 02:39:46 +00:00
|
|
|
*kallsyms_name,
|
2010-06-14 19:26:30 +00:00
|
|
|
*source_prefix,
|
2009-12-15 22:04:41 +00:00
|
|
|
*field_sep;
|
2010-04-19 05:32:50 +00:00
|
|
|
const char *default_guest_vmlinux_name,
|
|
|
|
*default_guest_kallsyms,
|
|
|
|
*default_guest_modules;
|
|
|
|
const char *guestmount;
|
2010-05-17 19:22:41 +00:00
|
|
|
const char *dso_list_str,
|
2009-12-15 22:04:40 +00:00
|
|
|
*comm_list_str,
|
2015-03-24 15:52:41 +00:00
|
|
|
*pid_list_str,
|
|
|
|
*tid_list_str,
|
2009-12-15 22:04:40 +00:00
|
|
|
*sym_list_str,
|
|
|
|
*col_width_list_str;
|
|
|
|
struct strlist *dso_list,
|
|
|
|
*comm_list,
|
2012-03-08 22:47:48 +00:00
|
|
|
*sym_list,
|
|
|
|
*dso_from_list,
|
|
|
|
*dso_to_list,
|
|
|
|
*sym_from_list,
|
|
|
|
*sym_to_list;
|
2015-03-24 15:52:41 +00:00
|
|
|
struct intlist *pid_list,
|
|
|
|
*tid_list;
|
2010-12-09 20:27:07 +00:00
|
|
|
const char *symfs;
|
2009-11-24 14:05:15 +00:00
|
|
|
};
|
|
|
|
|
2009-12-15 22:04:39 +00:00
|
|
|
extern struct symbol_conf symbol_conf;
|
2014-07-29 13:21:58 +00:00
|
|
|
|
|
|
|
static inline int __symbol__join_symfs(char *bf, size_t size, const char *path)
|
|
|
|
{
|
|
|
|
return path__join(bf, size, symbol_conf.symfs, path);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define symbol__join_symfs(bf, path) __symbol__join_symfs(bf, sizeof(bf), path)
|
|
|
|
|
2012-12-07 20:39:39 +00:00
|
|
|
extern int vmlinux_path__nr_entries;
|
|
|
|
extern char **vmlinux_path;
|
2009-10-30 18:28:24 +00:00
|
|
|
|
2011-03-31 13:56:28 +00:00
|
|
|
static inline void *symbol__priv(struct symbol *sym)
|
2009-10-30 18:28:24 +00:00
|
|
|
{
|
2011-03-31 13:56:28 +00:00
|
|
|
return ((void *)sym) - symbol_conf.priv_size;
|
2009-10-30 18:28:24 +00:00
|
|
|
}
|
|
|
|
|
2010-02-03 18:52:00 +00:00
|
|
|
struct ref_reloc_sym {
|
|
|
|
const char *name;
|
|
|
|
u64 addr;
|
|
|
|
u64 unrelocated_addr;
|
|
|
|
};
|
|
|
|
|
2010-03-24 19:40:17 +00:00
|
|
|
struct map_symbol {
|
|
|
|
struct map *map;
|
|
|
|
struct symbol *sym;
|
|
|
|
};
|
|
|
|
|
2012-02-09 22:21:01 +00:00
|
|
|
struct addr_map_symbol {
|
|
|
|
struct map *map;
|
|
|
|
struct symbol *sym;
|
|
|
|
u64 addr;
|
2012-03-08 22:47:48 +00:00
|
|
|
u64 al_addr;
|
2012-02-09 22:21:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct branch_info {
|
|
|
|
struct addr_map_symbol from;
|
|
|
|
struct addr_map_symbol to;
|
|
|
|
struct branch_flags flags;
|
|
|
|
};
|
|
|
|
|
2013-01-24 15:10:35 +00:00
|
|
|
struct mem_info {
|
|
|
|
struct addr_map_symbol iaddr;
|
|
|
|
struct addr_map_symbol daddr;
|
|
|
|
union perf_mem_data_src data_src;
|
|
|
|
};
|
|
|
|
|
perf tools: Consolidate symbol resolving across all tools
Now we have a very high level routine for simple tools to
process IP sample events:
int event__preprocess_sample(const event_t *self,
struct addr_location *al,
symbol_filter_t filter)
It receives the event itself and will insert new threads in the
global threads list and resolve the map and symbol, filling all
this info into the new addr_location struct, so that tools like
annotate and report can further process the event by creating
hist_entries in their specific way (with or without callgraphs,
etc).
It in turn uses the new next layer function:
void thread__find_addr_location(struct thread *self, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
This one will, given a thread (userspace or the kernel kthread
one), will find the given type (MAP__FUNCTION now, MAP__VARIABLE
too in the near future) at the given cpumode, taking vdsos into
account (userspace hit, but kernel symbol) and will fill all
these details in the addr_location given.
Tools that need a more compact API for plain function
resolution, like 'kmem', can use this other one:
struct symbol *thread__find_function(struct thread *self, u64 addr,
symbol_filter_t filter)
So, to resolve a kernel symbol, that is all the 'kmem' tool
needs, its just a matter of calling:
sym = thread__find_function(kthread, addr, NULL);
The 'filter' parameter is needed because we do lazy
parsing/loading of ELF symtabs or /proc/kallsyms.
With this we remove more code duplication all around, which is
always good, huh? :-)
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: John Kacur <jkacur@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1259346563-12568-12-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-27 18:29:23 +00:00
|
|
|
struct addr_location {
|
2013-12-19 20:20:06 +00:00
|
|
|
struct machine *machine;
|
perf tools: Consolidate symbol resolving across all tools
Now we have a very high level routine for simple tools to
process IP sample events:
int event__preprocess_sample(const event_t *self,
struct addr_location *al,
symbol_filter_t filter)
It receives the event itself and will insert new threads in the
global threads list and resolve the map and symbol, filling all
this info into the new addr_location struct, so that tools like
annotate and report can further process the event by creating
hist_entries in their specific way (with or without callgraphs,
etc).
It in turn uses the new next layer function:
void thread__find_addr_location(struct thread *self, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
This one will, given a thread (userspace or the kernel kthread
one), will find the given type (MAP__FUNCTION now, MAP__VARIABLE
too in the near future) at the given cpumode, taking vdsos into
account (userspace hit, but kernel symbol) and will fill all
these details in the addr_location given.
Tools that need a more compact API for plain function
resolution, like 'kmem', can use this other one:
struct symbol *thread__find_function(struct thread *self, u64 addr,
symbol_filter_t filter)
So, to resolve a kernel symbol, that is all the 'kmem' tool
needs, its just a matter of calling:
sym = thread__find_function(kthread, addr, NULL);
The 'filter' parameter is needed because we do lazy
parsing/loading of ELF symtabs or /proc/kallsyms.
With this we remove more code duplication all around, which is
always good, huh? :-)
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: John Kacur <jkacur@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1259346563-12568-12-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-27 18:29:23 +00:00
|
|
|
struct thread *thread;
|
|
|
|
struct map *map;
|
|
|
|
struct symbol *sym;
|
|
|
|
u64 addr;
|
|
|
|
char level;
|
2014-03-17 19:59:21 +00:00
|
|
|
u8 filtered;
|
2010-06-04 14:27:10 +00:00
|
|
|
u8 cpumode;
|
|
|
|
s32 cpu;
|
2010-04-19 05:32:50 +00:00
|
|
|
};
|
|
|
|
|
2012-08-10 22:22:57 +00:00
|
|
|
struct symsrc {
|
|
|
|
char *name;
|
|
|
|
int fd;
|
|
|
|
enum dso_binary_type type;
|
|
|
|
|
2013-09-30 10:07:11 +00:00
|
|
|
#ifdef HAVE_LIBELF_SUPPORT
|
2012-08-10 22:22:57 +00:00
|
|
|
Elf *elf;
|
|
|
|
GElf_Ehdr ehdr;
|
|
|
|
|
|
|
|
Elf_Scn *opdsec;
|
|
|
|
size_t opdidx;
|
|
|
|
GElf_Shdr opdshdr;
|
|
|
|
|
|
|
|
Elf_Scn *symtab;
|
|
|
|
GElf_Shdr symshdr;
|
|
|
|
|
|
|
|
Elf_Scn *dynsym;
|
|
|
|
size_t dynsym_idx;
|
|
|
|
GElf_Shdr dynshdr;
|
|
|
|
|
|
|
|
bool adjust_symbols;
|
2014-07-14 10:02:41 +00:00
|
|
|
bool is_64_bit;
|
2012-08-10 22:22:57 +00:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
void symsrc__destroy(struct symsrc *ss);
|
|
|
|
int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
|
|
|
|
enum dso_binary_type type);
|
2012-08-10 22:23:00 +00:00
|
|
|
bool symsrc__has_symtab(struct symsrc *ss);
|
2012-08-10 22:23:02 +00:00
|
|
|
bool symsrc__possibly_runtime(struct symsrc *ss);
|
2012-08-10 22:22:57 +00:00
|
|
|
|
2011-03-31 13:56:28 +00:00
|
|
|
int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter);
|
|
|
|
int dso__load_vmlinux(struct dso *dso, struct map *map,
|
2013-12-10 14:58:52 +00:00
|
|
|
const char *vmlinux, bool vmlinux_allocated,
|
|
|
|
symbol_filter_t filter);
|
2011-03-31 13:56:28 +00:00
|
|
|
int dso__load_vmlinux_path(struct dso *dso, struct map *map,
|
2010-02-03 18:52:00 +00:00
|
|
|
symbol_filter_t filter);
|
2011-03-31 13:56:28 +00:00
|
|
|
int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
|
2010-02-03 18:52:00 +00:00
|
|
|
symbol_filter_t filter);
|
2012-10-27 21:18:32 +00:00
|
|
|
|
2011-03-31 13:56:28 +00:00
|
|
|
struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
|
|
|
|
u64 addr);
|
|
|
|
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
|
perf symbols: Allow lookups by symbol name too
Configurable via symbol_conf.sort_by_name, so that the cost of an
extra rb_node on all 'struct symbol' instances is not paid by tools
that only want to decode addresses.
How to use it:
symbol_conf.sort_by_name = true;
symbol_init(&symbol_conf);
struct map *map = map_groups__find_by_name(kmaps, MAP__VARIABLE, "[kernel.kallsyms]");
if (map == NULL) {
pr_err("couldn't find map!\n");
kernel_maps__fprintf(stdout);
} else {
struct symbol *sym = map__find_symbol_by_name(map, sym_filter, NULL);
if (sym == NULL)
pr_err("couldn't find symbol %s!\n", sym_filter);
else
pr_info("symbol %s: %#Lx-%#Lx \n", sym_filter, sym->start, sym->end);
}
Looking over the vmlinux/kallsyms is common enough that I'll add a
variable to the upcoming struct perf_session to avoid the need to
use map_groups__find_by_name to get the main vmlinux/kallsyms map.
The above example looks on the 'variable' symtab, but it is just
like that for the functions one.
Also the sort operation is done when we first use
map__find_symbol_by_name, in a lazy way.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260564622-12392-1-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-11 20:50:22 +00:00
|
|
|
const char *name);
|
2015-01-16 18:39:53 +00:00
|
|
|
struct symbol *symbol__next_by_name(struct symbol *sym);
|
2009-05-28 17:55:04 +00:00
|
|
|
|
2014-07-14 10:02:50 +00:00
|
|
|
struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
|
|
|
|
struct symbol *dso__next_symbol(struct symbol *sym);
|
|
|
|
|
2014-07-22 13:17:59 +00:00
|
|
|
enum dso_type dso__type_fd(int fd);
|
|
|
|
|
2009-11-03 23:46:10 +00:00
|
|
|
int filename__read_build_id(const char *filename, void *bf, size_t size);
|
2009-11-18 22:20:52 +00:00
|
|
|
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
|
2013-10-08 08:45:48 +00:00
|
|
|
int modules__parse(const char *filename, void *arg,
|
|
|
|
int (*process_module)(void *arg, const char *name,
|
|
|
|
u64 start));
|
2012-08-06 04:41:20 +00:00
|
|
|
int filename__read_debuglink(const char *filename, char *debuglink,
|
|
|
|
size_t size);
|
2009-11-03 23:46:10 +00:00
|
|
|
|
2014-08-12 06:40:45 +00:00
|
|
|
struct perf_session_env;
|
|
|
|
int symbol__init(struct perf_session_env *env);
|
2010-07-30 21:31:28 +00:00
|
|
|
void symbol__exit(void);
|
2012-08-06 04:41:19 +00:00
|
|
|
void symbol__elf_init(void);
|
2012-08-06 04:41:20 +00:00
|
|
|
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
|
2012-01-30 04:43:15 +00:00
|
|
|
size_t symbol__fprintf_symname_offs(const struct symbol *sym,
|
|
|
|
const struct addr_location *al, FILE *fp);
|
2012-01-30 04:42:57 +00:00
|
|
|
size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
|
2012-10-27 21:18:32 +00:00
|
|
|
size_t symbol__fprintf(struct symbol *sym, FILE *fp);
|
2010-01-04 18:19:27 +00:00
|
|
|
bool symbol_type__is_a(char symbol_type, enum map_type map_type);
|
2012-12-07 20:39:39 +00:00
|
|
|
bool symbol__restricted_filename(const char *filename,
|
|
|
|
const char *restricted_filename);
|
2013-11-18 20:32:45 +00:00
|
|
|
bool symbol__is_idle(struct symbol *sym);
|
2010-01-04 18:19:27 +00:00
|
|
|
|
2012-08-10 22:23:01 +00:00
|
|
|
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
|
|
|
struct symsrc *runtime_ss, symbol_filter_t filter,
|
|
|
|
int kmodule);
|
2012-08-10 22:22:59 +00:00
|
|
|
int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss,
|
|
|
|
struct map *map, symbol_filter_t filter);
|
2012-08-06 04:41:20 +00:00
|
|
|
|
|
|
|
void symbols__insert(struct rb_root *symbols, struct symbol *sym);
|
|
|
|
void symbols__fixup_duplicate(struct rb_root *symbols);
|
|
|
|
void symbols__fixup_end(struct rb_root *symbols);
|
|
|
|
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type);
|
|
|
|
|
2013-08-07 11:38:51 +00:00
|
|
|
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
|
|
|
|
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
|
|
|
|
bool *is_64_bit);
|
|
|
|
|
2013-10-09 12:01:12 +00:00
|
|
|
#define PERF_KCORE_EXTRACT "/tmp/perf-kcore-XXXXXX"
|
|
|
|
|
|
|
|
struct kcore_extract {
|
|
|
|
char *kcore_filename;
|
|
|
|
u64 addr;
|
|
|
|
u64 offs;
|
|
|
|
u64 len;
|
|
|
|
char extract_filename[sizeof(PERF_KCORE_EXTRACT)];
|
|
|
|
int fd;
|
|
|
|
};
|
|
|
|
|
|
|
|
int kcore_extract__create(struct kcore_extract *kce);
|
|
|
|
void kcore_extract__delete(struct kcore_extract *kce);
|
|
|
|
|
2013-10-14 13:57:29 +00:00
|
|
|
int kcore_copy(const char *from_dir, const char *to_dir);
|
|
|
|
int compare_proc_modules(const char *from, const char *to);
|
|
|
|
|
2013-11-18 20:32:48 +00:00
|
|
|
int setup_list(struct strlist **list, const char *list_str,
|
|
|
|
const char *list_name);
|
2015-03-24 15:52:41 +00:00
|
|
|
int setup_intlist(struct intlist **list, const char *list_str,
|
|
|
|
const char *list_name);
|
2013-11-18 20:32:48 +00:00
|
|
|
|
2015-04-28 12:05:35 +00:00
|
|
|
#ifdef HAVE_LIBELF_SUPPORT
|
|
|
|
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
|
2015-04-28 12:05:38 +00:00
|
|
|
void arch__elf_sym_adjust(GElf_Sym *sym);
|
2015-04-28 12:05:35 +00:00
|
|
|
#endif
|
|
|
|
|
2015-04-28 12:05:36 +00:00
|
|
|
#define SYMBOL_A 0
|
|
|
|
#define SYMBOL_B 1
|
|
|
|
|
|
|
|
int arch__choose_best_symbol(struct symbol *syma, struct symbol *symb);
|
|
|
|
|
2009-09-24 16:02:18 +00:00
|
|
|
#endif /* __PERF_SYMBOL */
|