Merge branch 'perf/core' into perf/probes
Conflicts: tools/perf/Makefile Merge reason: - fix the conflict - pick up the pr_*() infrastructure to queue up dependent patch Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
4331595650
@ -779,6 +779,13 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
by the set_ftrace_notrace file in the debugfs
|
||||
tracing directory.
|
||||
|
||||
ftrace_graph_filter=[function-list]
|
||||
[FTRACE] Limit the top level callers functions traced
|
||||
by the function graph tracer at boot up.
|
||||
function-list is a comma separated list of functions
|
||||
that can be changed at run time by the
|
||||
set_graph_function file in the debugfs tracing directory.
|
||||
|
||||
gamecon.map[2|3]=
|
||||
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
|
||||
support via parallel port (up to 5 devices per port)
|
||||
|
@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option.
|
||||
<details to be filled>
|
||||
|
||||
|
||||
HAVE_FTRACE_SYSCALLS
|
||||
HAVE_SYSCALL_TRACEPOINTS
|
||||
---------------------
|
||||
|
||||
<details to be filled>
|
||||
You need very few things to get the syscalls tracing in an arch.
|
||||
|
||||
- Have a NR_syscalls variable in <asm/unistd.h> that provides the number
|
||||
of syscalls supported by the arch.
|
||||
- Implement arch_syscall_addr() that resolves a syscall address from a
|
||||
syscall number.
|
||||
- Support the TIF_SYSCALL_TRACEPOINT thread flags
|
||||
- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
|
||||
in the ptrace syscalls tracing path.
|
||||
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
|
||||
|
||||
|
||||
HAVE_FTRACE_MCOUNT_RECORD
|
||||
|
@ -203,73 +203,10 @@ out:
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
|
||||
extern unsigned long __start_syscalls_metadata[];
|
||||
extern unsigned long __stop_syscalls_metadata[];
|
||||
extern unsigned int sys_call_table[];
|
||||
|
||||
static struct syscall_metadata **syscalls_metadata;
|
||||
|
||||
struct syscall_metadata *syscall_nr_to_meta(int nr)
|
||||
unsigned long __init arch_syscall_addr(int nr)
|
||||
{
|
||||
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
|
||||
return NULL;
|
||||
|
||||
return syscalls_metadata[nr];
|
||||
return (unsigned long)sys_call_table[nr];
|
||||
}
|
||||
|
||||
int syscall_name_to_nr(char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!syscalls_metadata)
|
||||
return -1;
|
||||
for (i = 0; i < NR_syscalls; i++)
|
||||
if (syscalls_metadata[i])
|
||||
if (!strcmp(syscalls_metadata[i]->name, name))
|
||||
return i;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void set_syscall_enter_id(int num, int id)
|
||||
{
|
||||
syscalls_metadata[num]->enter_id = id;
|
||||
}
|
||||
|
||||
void set_syscall_exit_id(int num, int id)
|
||||
{
|
||||
syscalls_metadata[num]->exit_id = id;
|
||||
}
|
||||
|
||||
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
|
||||
{
|
||||
struct syscall_metadata *start;
|
||||
struct syscall_metadata *stop;
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
|
||||
start = (struct syscall_metadata *)__start_syscalls_metadata;
|
||||
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
|
||||
kallsyms_lookup(syscall, NULL, NULL, NULL, str);
|
||||
|
||||
for ( ; start < stop; start++) {
|
||||
if (start->name && !strcmp(start->name + 3, str + 3))
|
||||
return start;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int __init arch_init_ftrace_syscalls(void)
|
||||
{
|
||||
struct syscall_metadata *meta;
|
||||
int i;
|
||||
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
|
||||
GFP_KERNEL);
|
||||
if (!syscalls_metadata)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < NR_syscalls; i++) {
|
||||
meta = find_syscall_meta((unsigned long)sys_call_table[i]);
|
||||
syscalls_metadata[i] = meta;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(arch_init_ftrace_syscalls);
|
||||
#endif
|
||||
|
@ -28,9 +28,20 @@
|
||||
*/
|
||||
#define ARCH_PERFMON_EVENT_MASK 0xffff
|
||||
|
||||
/*
|
||||
* filter mask to validate fixed counter events.
|
||||
* the following filters disqualify for fixed counters:
|
||||
* - inv
|
||||
* - edge
|
||||
* - cnt-mask
|
||||
* The other filters are supported by fixed counters.
|
||||
* The any-thread option is supported starting with v3.
|
||||
*/
|
||||
#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000
|
||||
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
|
||||
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
|
||||
|
||||
|
@ -77,6 +77,18 @@ struct cpu_hw_events {
|
||||
struct debug_store *ds;
|
||||
};
|
||||
|
||||
struct event_constraint {
|
||||
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
int code;
|
||||
};
|
||||
|
||||
#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
|
||||
#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
|
||||
|
||||
#define for_each_event_constraint(e, c) \
|
||||
for ((e) = (c); (e)->idxmsk[0]; (e)++)
|
||||
|
||||
|
||||
/*
|
||||
* struct x86_pmu - generic x86 pmu
|
||||
*/
|
||||
@ -102,6 +114,8 @@ struct x86_pmu {
|
||||
u64 intel_ctrl;
|
||||
void (*enable_bts)(u64 config);
|
||||
void (*disable_bts)(void);
|
||||
int (*get_event_idx)(struct cpu_hw_events *cpuc,
|
||||
struct hw_perf_event *hwc);
|
||||
};
|
||||
|
||||
static struct x86_pmu x86_pmu __read_mostly;
|
||||
@ -110,6 +124,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
||||
.enabled = 1,
|
||||
};
|
||||
|
||||
static const struct event_constraint *event_constraints;
|
||||
|
||||
/*
|
||||
* Not sure about some of these
|
||||
*/
|
||||
@ -155,6 +171,16 @@ static u64 p6_pmu_raw_event(u64 hw_event)
|
||||
return hw_event & P6_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
static const struct event_constraint intel_p6_event_constraints[] =
|
||||
{
|
||||
EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
||||
EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
||||
EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
|
||||
EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
||||
EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
||||
EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
/*
|
||||
* Intel PerfMon v3. Used on Core2 and later.
|
||||
@ -170,6 +196,35 @@ static const u64 intel_perfmon_event_map[] =
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
|
||||
};
|
||||
|
||||
static const struct event_constraint intel_core_event_constraints[] =
|
||||
{
|
||||
EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
||||
EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
|
||||
EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
||||
EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
||||
EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
||||
EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
|
||||
EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
|
||||
EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
|
||||
EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static const struct event_constraint intel_nehalem_event_constraints[] =
|
||||
{
|
||||
EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
|
||||
EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
|
||||
EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
|
||||
EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
|
||||
EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
|
||||
EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
|
||||
EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
|
||||
EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
|
||||
EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
|
||||
EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static u64 intel_pmu_event_map(int hw_event)
|
||||
{
|
||||
return intel_perfmon_event_map[hw_event];
|
||||
@ -469,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event)
|
||||
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
|
||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
|
||||
#define CORE_EVNTSEL_MASK \
|
||||
(CORE_EVNTSEL_EVENT_MASK | \
|
||||
@ -932,6 +987,8 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||
*/
|
||||
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
|
||||
|
||||
hwc->idx = -1;
|
||||
|
||||
/*
|
||||
* Count user and OS events unless requested not to.
|
||||
*/
|
||||
@ -1334,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
x86_pmu_enable_event(hwc, idx);
|
||||
}
|
||||
|
||||
static int
|
||||
fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
|
||||
static int fixed_mode_idx(struct hw_perf_event *hwc)
|
||||
{
|
||||
unsigned int hw_event;
|
||||
|
||||
@ -1349,6 +1405,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
|
||||
if (!x86_pmu.num_events_fixed)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* fixed counters do not take all possible filters
|
||||
*/
|
||||
if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
|
||||
return -1;
|
||||
|
||||
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
|
||||
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
|
||||
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
|
||||
@ -1360,22 +1422,57 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a PMC slot for the freshly enabled / scheduled in event:
|
||||
* generic counter allocator: get next free counter
|
||||
*/
|
||||
static int x86_pmu_enable(struct perf_event *event)
|
||||
static int
|
||||
gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx;
|
||||
|
||||
idx = fixed_mode_idx(event, hwc);
|
||||
idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
|
||||
return idx == x86_pmu.num_events ? -1 : idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* intel-specific counter allocator: check event constraints
|
||||
*/
|
||||
static int
|
||||
intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
||||
{
|
||||
const struct event_constraint *event_constraint;
|
||||
int i, code;
|
||||
|
||||
if (!event_constraints)
|
||||
goto skip;
|
||||
|
||||
code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
|
||||
|
||||
for_each_event_constraint(event_constraint, event_constraints) {
|
||||
if (code == event_constraint->code) {
|
||||
for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
|
||||
if (!test_and_set_bit(i, cpuc->used_mask))
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
skip:
|
||||
return gen_get_event_idx(cpuc, hwc);
|
||||
}
|
||||
|
||||
static int
|
||||
x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
||||
{
|
||||
int idx;
|
||||
|
||||
idx = fixed_mode_idx(hwc);
|
||||
if (idx == X86_PMC_IDX_FIXED_BTS) {
|
||||
/* BTS is already occupied. */
|
||||
if (test_and_set_bit(idx, cpuc->used_mask))
|
||||
return -EAGAIN;
|
||||
|
||||
hwc->config_base = 0;
|
||||
hwc->event_base = 0;
|
||||
hwc->event_base = 0;
|
||||
hwc->idx = idx;
|
||||
} else if (idx >= 0) {
|
||||
/*
|
||||
@ -1396,20 +1493,35 @@ static int x86_pmu_enable(struct perf_event *event)
|
||||
} else {
|
||||
idx = hwc->idx;
|
||||
/* Try to get the previous generic event again */
|
||||
if (test_and_set_bit(idx, cpuc->used_mask)) {
|
||||
if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
|
||||
try_generic:
|
||||
idx = find_first_zero_bit(cpuc->used_mask,
|
||||
x86_pmu.num_events);
|
||||
if (idx == x86_pmu.num_events)
|
||||
idx = x86_pmu.get_event_idx(cpuc, hwc);
|
||||
if (idx == -1)
|
||||
return -EAGAIN;
|
||||
|
||||
set_bit(idx, cpuc->used_mask);
|
||||
hwc->idx = idx;
|
||||
}
|
||||
hwc->config_base = x86_pmu.eventsel;
|
||||
hwc->event_base = x86_pmu.perfctr;
|
||||
hwc->config_base = x86_pmu.eventsel;
|
||||
hwc->event_base = x86_pmu.perfctr;
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a PMC slot for the freshly enabled / scheduled in event:
|
||||
*/
|
||||
static int x86_pmu_enable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx;
|
||||
|
||||
idx = x86_schedule_event(cpuc, hwc);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
perf_events_lapic_init();
|
||||
|
||||
x86_pmu.disable(hwc, idx);
|
||||
@ -1877,6 +1989,7 @@ static struct x86_pmu p6_pmu = {
|
||||
*/
|
||||
.event_bits = 32,
|
||||
.event_mask = (1ULL << 32) - 1,
|
||||
.get_event_idx = intel_get_event_idx,
|
||||
};
|
||||
|
||||
static struct x86_pmu intel_pmu = {
|
||||
@ -1900,6 +2013,7 @@ static struct x86_pmu intel_pmu = {
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.enable_bts = intel_pmu_enable_bts,
|
||||
.disable_bts = intel_pmu_disable_bts,
|
||||
.get_event_idx = intel_get_event_idx,
|
||||
};
|
||||
|
||||
static struct x86_pmu amd_pmu = {
|
||||
@ -1920,6 +2034,7 @@ static struct x86_pmu amd_pmu = {
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
.get_event_idx = gen_get_event_idx,
|
||||
};
|
||||
|
||||
static int p6_pmu_init(void)
|
||||
@ -1932,10 +2047,12 @@ static int p6_pmu_init(void)
|
||||
case 7:
|
||||
case 8:
|
||||
case 11: /* Pentium III */
|
||||
event_constraints = intel_p6_event_constraints;
|
||||
break;
|
||||
case 9:
|
||||
case 13:
|
||||
/* Pentium M */
|
||||
event_constraints = intel_p6_event_constraints;
|
||||
break;
|
||||
default:
|
||||
pr_cont("unsupported p6 CPU model %d ",
|
||||
@ -2007,12 +2124,14 @@ static int intel_pmu_init(void)
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
pr_cont("Core2 events, ");
|
||||
event_constraints = intel_core_event_constraints;
|
||||
break;
|
||||
default:
|
||||
case 26:
|
||||
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
event_constraints = intel_nehalem_event_constraints;
|
||||
pr_cont("Nehalem/Corei7 events, ");
|
||||
break;
|
||||
case 28:
|
||||
@ -2105,11 +2224,47 @@ static const struct pmu pmu = {
|
||||
.unthrottle = x86_pmu_unthrottle,
|
||||
};
|
||||
|
||||
static int
|
||||
validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event fake_event = event->hw;
|
||||
|
||||
if (event->pmu != &pmu)
|
||||
return 0;
|
||||
|
||||
return x86_schedule_event(cpuc, &fake_event);
|
||||
}
|
||||
|
||||
static int validate_group(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *sibling, *leader = event->group_leader;
|
||||
struct cpu_hw_events fake_pmu;
|
||||
|
||||
memset(&fake_pmu, 0, sizeof(fake_pmu));
|
||||
|
||||
if (!validate_event(&fake_pmu, leader))
|
||||
return -ENOSPC;
|
||||
|
||||
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
||||
if (!validate_event(&fake_pmu, sibling))
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (!validate_event(&fake_pmu, event))
|
||||
return -ENOSPC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pmu *hw_perf_event_init(struct perf_event *event)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = __hw_perf_event_init(event);
|
||||
if (!err) {
|
||||
if (event->group_leader != event)
|
||||
err = validate_group(event);
|
||||
}
|
||||
if (err) {
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
|
@ -1209,17 +1209,14 @@ END(ftrace_graph_caller)
|
||||
|
||||
.globl return_to_handler
|
||||
return_to_handler:
|
||||
pushl $0
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
movl %ebp, %eax
|
||||
call ftrace_return_to_handler
|
||||
movl %eax, 0xc(%esp)
|
||||
movl %eax, %ecx
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
ret
|
||||
jmp *%ecx
|
||||
#endif
|
||||
|
||||
.section .rodata,"a"
|
||||
|
@ -155,11 +155,11 @@ GLOBAL(return_to_handler)
|
||||
|
||||
call ftrace_return_to_handler
|
||||
|
||||
movq %rax, 16(%rsp)
|
||||
movq %rax, %rdi
|
||||
movq 8(%rsp), %rdx
|
||||
movq (%rsp), %rax
|
||||
addq $16, %rsp
|
||||
retq
|
||||
addq $24, %rsp
|
||||
jmp *%rdi
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -9,6 +9,8 @@
|
||||
* the dangers of modifying code on the run.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data)
|
||||
|
||||
switch (faulted) {
|
||||
case 0:
|
||||
pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
|
||||
pr_info("converting mcount calls to 0f 1f 44 00 00\n");
|
||||
memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
|
||||
break;
|
||||
case 1:
|
||||
pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
|
||||
pr_info("converting mcount calls to 66 66 66 66 90\n");
|
||||
memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
|
||||
break;
|
||||
case 2:
|
||||
pr_info("ftrace: converting mcount calls to jmp . + 5\n");
|
||||
pr_info("converting mcount calls to jmp . + 5\n");
|
||||
memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
|
||||
break;
|
||||
}
|
||||
@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
|
||||
extern unsigned long __start_syscalls_metadata[];
|
||||
extern unsigned long __stop_syscalls_metadata[];
|
||||
extern unsigned long *sys_call_table;
|
||||
|
||||
static struct syscall_metadata **syscalls_metadata;
|
||||
|
||||
static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
|
||||
unsigned long __init arch_syscall_addr(int nr)
|
||||
{
|
||||
struct syscall_metadata *start;
|
||||
struct syscall_metadata *stop;
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
|
||||
|
||||
start = (struct syscall_metadata *)__start_syscalls_metadata;
|
||||
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
|
||||
kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
|
||||
|
||||
for ( ; start < stop; start++) {
|
||||
if (start->name && !strcmp(start->name, str))
|
||||
return start;
|
||||
}
|
||||
return NULL;
|
||||
return (unsigned long)(&sys_call_table)[nr];
|
||||
}
|
||||
|
||||
struct syscall_metadata *syscall_nr_to_meta(int nr)
|
||||
{
|
||||
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
|
||||
return NULL;
|
||||
|
||||
return syscalls_metadata[nr];
|
||||
}
|
||||
|
||||
int syscall_name_to_nr(char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!syscalls_metadata)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < NR_syscalls; i++) {
|
||||
if (syscalls_metadata[i]) {
|
||||
if (!strcmp(syscalls_metadata[i]->name, name))
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void set_syscall_enter_id(int num, int id)
|
||||
{
|
||||
syscalls_metadata[num]->enter_id = id;
|
||||
}
|
||||
|
||||
void set_syscall_exit_id(int num, int id)
|
||||
{
|
||||
syscalls_metadata[num]->exit_id = id;
|
||||
}
|
||||
|
||||
static int __init arch_init_ftrace_syscalls(void)
|
||||
{
|
||||
int i;
|
||||
struct syscall_metadata *meta;
|
||||
unsigned long **psys_syscall_table = &sys_call_table;
|
||||
|
||||
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
|
||||
NR_syscalls, GFP_KERNEL);
|
||||
if (!syscalls_metadata) {
|
||||
WARN_ON(1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_syscalls; i++) {
|
||||
meta = find_syscall_meta(psys_syscall_table[i]);
|
||||
syscalls_metadata[i] = meta;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(arch_init_ftrace_syscalls);
|
||||
#endif
|
||||
|
@ -1,12 +1,13 @@
|
||||
/*
|
||||
* Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mmiotrace.h>
|
||||
|
||||
#define MODULE_NAME "testmmiotrace"
|
||||
|
||||
static unsigned long mmio_address;
|
||||
module_param(mmio_address, ulong, 0);
|
||||
MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
|
||||
@ -30,7 +31,7 @@ static unsigned v32(unsigned i)
|
||||
static void do_write_test(void __iomem *p)
|
||||
{
|
||||
unsigned int i;
|
||||
pr_info(MODULE_NAME ": write test.\n");
|
||||
pr_info("write test.\n");
|
||||
mmiotrace_printk("Write test.\n");
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned errs[3] = { 0 };
|
||||
pr_info(MODULE_NAME ": read test.\n");
|
||||
pr_info("read test.\n");
|
||||
mmiotrace_printk("Read test.\n");
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p)
|
||||
|
||||
static void do_read_far_test(void __iomem *p)
|
||||
{
|
||||
pr_info(MODULE_NAME ": read far test.\n");
|
||||
pr_info("read far test.\n");
|
||||
mmiotrace_printk("Read far test.\n");
|
||||
|
||||
ioread32(p + read_far);
|
||||
@ -78,7 +79,7 @@ static void do_test(unsigned long size)
|
||||
{
|
||||
void __iomem *p = ioremap_nocache(mmio_address, size);
|
||||
if (!p) {
|
||||
pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
|
||||
pr_err("could not ioremap, aborting.\n");
|
||||
return;
|
||||
}
|
||||
mmiotrace_printk("ioremap returned %p.\n", p);
|
||||
@ -94,24 +95,22 @@ static int __init init(void)
|
||||
unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
|
||||
|
||||
if (mmio_address == 0) {
|
||||
pr_err(MODULE_NAME ": you have to use the module argument "
|
||||
"mmio_address.\n");
|
||||
pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS"
|
||||
" YOU REALLY KNOW WHAT YOU ARE DOING!\n");
|
||||
pr_err("you have to use the module argument mmio_address.\n");
|
||||
pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
|
||||
"address space, and writing 16 kB of rubbish in there.\n",
|
||||
size >> 10, mmio_address);
|
||||
pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
|
||||
"and writing 16 kB of rubbish in there.\n",
|
||||
size >> 10, mmio_address);
|
||||
do_test(size);
|
||||
pr_info(MODULE_NAME ": All done.\n");
|
||||
pr_info("All done.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit cleanup(void)
|
||||
{
|
||||
pr_debug(MODULE_NAME ": unloaded.\n");
|
||||
pr_debug("unloaded.\n");
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
|
@ -144,7 +144,7 @@ extern char *trace_profile_buf_nmi;
|
||||
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
|
||||
|
||||
extern void destroy_preds(struct ftrace_event_call *call);
|
||||
extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
|
||||
extern int filter_match_preds(struct event_filter *filter, void *rec);
|
||||
extern int filter_current_check_discard(struct ring_buffer *buffer,
|
||||
struct ftrace_event_call *call,
|
||||
void *rec,
|
||||
@ -187,4 +187,13 @@ do { \
|
||||
__trace_printk(ip, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
struct perf_event;
|
||||
extern int ftrace_profile_enable(int event_id);
|
||||
extern void ftrace_profile_disable(int event_id);
|
||||
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||
char *filter_str);
|
||||
extern void ftrace_profile_free_filter(struct perf_event *event);
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_FTRACE_EVENT_H */
|
||||
|
@ -225,6 +225,7 @@ struct perf_counter_attr {
|
||||
#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
|
||||
#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
|
||||
#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
|
||||
#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *)
|
||||
|
||||
enum perf_counter_ioc_flags {
|
||||
PERF_IOC_FLAG_GROUP = 1U << 0,
|
||||
|
@ -221,6 +221,7 @@ struct perf_event_attr {
|
||||
#define PERF_EVENT_IOC_RESET _IO ('$', 3)
|
||||
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
|
||||
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
|
||||
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
|
||||
|
||||
enum perf_event_ioc_flags {
|
||||
PERF_IOC_FLAG_GROUP = 1U << 0,
|
||||
@ -633,7 +634,12 @@ struct perf_event {
|
||||
|
||||
struct pid_namespace *ns;
|
||||
u64 id;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
struct event_filter *filter;
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
|
||||
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
|
||||
extern void __lockfunc
|
||||
_lock_kernel(const char *func, const char *file, int line)
|
||||
__acquires(kernel_lock);
|
||||
|
||||
extern void __lockfunc
|
||||
_unlock_kernel(const char *func, const char *file, int line)
|
||||
__releases(kernel_lock);
|
||||
|
||||
#define lock_kernel() do { \
|
||||
_lock_kernel(__func__, __FILE__, __LINE__); \
|
||||
} while (0)
|
||||
|
||||
#define unlock_kernel() do { \
|
||||
_unlock_kernel(__func__, __FILE__, __LINE__); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Various legacy drivers don't really need the BKL in a specific
|
||||
@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void)
|
||||
|
||||
#else
|
||||
|
||||
#define lock_kernel() do { } while(0)
|
||||
#define unlock_kernel() do { } while(0)
|
||||
#define lock_kernel()
|
||||
#define unlock_kernel()
|
||||
#define release_kernel_lock(task) do { } while(0)
|
||||
#define cycle_kernel_lock() do { } while(0)
|
||||
#define reacquire_kernel_lock(task) 0
|
||||
|
61
include/trace/events/bkl.h
Normal file
61
include/trace/events/bkl.h
Normal file
@ -0,0 +1,61 @@
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM bkl
|
||||
|
||||
#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_BKL_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(lock_kernel,
|
||||
|
||||
TP_PROTO(const char *func, const char *file, int line),
|
||||
|
||||
TP_ARGS(func, file, line),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, depth )
|
||||
__field_ext( const char *, func, FILTER_PTR_STRING )
|
||||
__field_ext( const char *, file, FILTER_PTR_STRING )
|
||||
__field( int, line )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
/* We want to record the lock_depth after lock is acquired */
|
||||
__entry->depth = current->lock_depth + 1;
|
||||
__entry->func = func;
|
||||
__entry->file = file;
|
||||
__entry->line = line;
|
||||
),
|
||||
|
||||
TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
|
||||
__entry->file, __entry->line, __entry->func)
|
||||
);
|
||||
|
||||
TRACE_EVENT(unlock_kernel,
|
||||
|
||||
TP_PROTO(const char *func, const char *file, int line),
|
||||
|
||||
TP_ARGS(func, file, line),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, depth )
|
||||
__field(const char *, func )
|
||||
__field(const char *, file )
|
||||
__field(int, line )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->depth = current->lock_depth;
|
||||
__entry->func = func;
|
||||
__entry->file = file;
|
||||
__entry->line = line;
|
||||
),
|
||||
|
||||
TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
|
||||
__entry->file, __entry->line, __entry->func)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_BKL_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry,
|
||||
__assign_str(name, action->name);
|
||||
),
|
||||
|
||||
TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name))
|
||||
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
|
||||
);
|
||||
|
||||
/**
|
||||
@ -78,7 +78,7 @@ TRACE_EVENT(irq_handler_exit,
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk("irq=%d return=%s",
|
||||
TP_printk("irq=%d ret=%s",
|
||||
__entry->irq, __entry->ret ? "handled" : "unhandled")
|
||||
);
|
||||
|
||||
@ -107,7 +107,7 @@ TRACE_EVENT(softirq_entry,
|
||||
__entry->vec = (int)(h - vec);
|
||||
),
|
||||
|
||||
TP_printk("softirq=%d action=%s", __entry->vec,
|
||||
TP_printk("vec=%d [action=%s]", __entry->vec,
|
||||
show_softirq_name(__entry->vec))
|
||||
);
|
||||
|
||||
@ -136,7 +136,7 @@ TRACE_EVENT(softirq_exit,
|
||||
__entry->vec = (int)(h - vec);
|
||||
),
|
||||
|
||||
TP_printk("softirq=%d action=%s", __entry->vec,
|
||||
TP_printk("vec=%d [action=%s]", __entry->vec,
|
||||
show_softirq_name(__entry->vec))
|
||||
);
|
||||
|
||||
|
@ -16,8 +16,6 @@ enum {
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
TRACE_EVENT(power_start,
|
||||
|
||||
TP_PROTO(unsigned int type, unsigned int state),
|
||||
|
@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop,
|
||||
__entry->pid = t->pid;
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d", __entry->comm, __entry->pid)
|
||||
TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret,
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk("ret %d", __entry->ret)
|
||||
TP_printk("ret=%d", __entry->ret)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task,
|
||||
__entry->prio = p->prio;
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
TP_printk("comm=%s pid=%d prio=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup,
|
||||
__field( pid_t, pid )
|
||||
__field( int, prio )
|
||||
__field( int, success )
|
||||
__field( int, cpu )
|
||||
__field( int, target_cpu )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -102,12 +102,12 @@ TRACE_EVENT(sched_wakeup,
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
__entry->success = success;
|
||||
__entry->cpu = task_cpu(p);
|
||||
__entry->target_cpu = task_cpu(p);
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] success=%d [%03d]",
|
||||
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->success, __entry->cpu)
|
||||
__entry->success, __entry->target_cpu)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -127,7 +127,7 @@ TRACE_EVENT(sched_wakeup_new,
|
||||
__field( pid_t, pid )
|
||||
__field( int, prio )
|
||||
__field( int, success )
|
||||
__field( int, cpu )
|
||||
__field( int, target_cpu )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -135,12 +135,12 @@ TRACE_EVENT(sched_wakeup_new,
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
__entry->success = success;
|
||||
__entry->cpu = task_cpu(p);
|
||||
__entry->target_cpu = task_cpu(p);
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] success=%d [%03d]",
|
||||
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->success, __entry->cpu)
|
||||
__entry->success, __entry->target_cpu)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -176,7 +176,7 @@ TRACE_EVENT(sched_switch,
|
||||
__entry->next_prio = next->prio;
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
|
||||
TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
|
||||
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
|
||||
__entry->prev_state ?
|
||||
__print_flags(__entry->prev_state, "|",
|
||||
@ -211,7 +211,7 @@ TRACE_EVENT(sched_migrate_task,
|
||||
__entry->dest_cpu = dest_cpu;
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] from: %d to: %d",
|
||||
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->orig_cpu, __entry->dest_cpu)
|
||||
);
|
||||
@ -237,7 +237,7 @@ TRACE_EVENT(sched_process_free,
|
||||
__entry->prio = p->prio;
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
TP_printk("comm=%s pid=%d prio=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
@ -262,7 +262,7 @@ TRACE_EVENT(sched_process_exit,
|
||||
__entry->prio = p->prio;
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
TP_printk("comm=%s pid=%d prio=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
@ -287,7 +287,7 @@ TRACE_EVENT(sched_process_wait,
|
||||
__entry->prio = current->prio;
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
TP_printk("comm=%s pid=%d prio=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
@ -314,7 +314,7 @@ TRACE_EVENT(sched_process_fork,
|
||||
__entry->child_pid = child->pid;
|
||||
),
|
||||
|
||||
TP_printk("parent %s:%d child %s:%d",
|
||||
TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
|
||||
__entry->parent_comm, __entry->parent_pid,
|
||||
__entry->child_comm, __entry->child_pid)
|
||||
);
|
||||
@ -340,7 +340,7 @@ TRACE_EVENT(sched_signal_send,
|
||||
__entry->sig = sig;
|
||||
),
|
||||
|
||||
TP_printk("sig: %d task %s:%d",
|
||||
TP_printk("sig=%d comm=%s pid=%d",
|
||||
__entry->sig, __entry->comm, __entry->pid)
|
||||
);
|
||||
|
||||
@ -374,7 +374,7 @@ TRACE_EVENT(sched_stat_wait,
|
||||
__perf_count(delay);
|
||||
),
|
||||
|
||||
TP_printk("task: %s:%d wait: %Lu [ns]",
|
||||
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
|
||||
__entry->comm, __entry->pid,
|
||||
(unsigned long long)__entry->delay)
|
||||
);
|
||||
@ -406,7 +406,7 @@ TRACE_EVENT(sched_stat_runtime,
|
||||
__perf_count(runtime);
|
||||
),
|
||||
|
||||
TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]",
|
||||
TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
|
||||
__entry->comm, __entry->pid,
|
||||
(unsigned long long)__entry->runtime,
|
||||
(unsigned long long)__entry->vruntime)
|
||||
@ -437,7 +437,7 @@ TRACE_EVENT(sched_stat_sleep,
|
||||
__perf_count(delay);
|
||||
),
|
||||
|
||||
TP_printk("task: %s:%d sleep: %Lu [ns]",
|
||||
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
|
||||
__entry->comm, __entry->pid,
|
||||
(unsigned long long)__entry->delay)
|
||||
);
|
||||
@ -467,7 +467,7 @@ TRACE_EVENT(sched_stat_iowait,
|
||||
__perf_count(delay);
|
||||
),
|
||||
|
||||
TP_printk("task: %s:%d iowait: %Lu [ns]",
|
||||
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
|
||||
__entry->comm, __entry->pid,
|
||||
(unsigned long long)__entry->delay)
|
||||
);
|
||||
|
@ -26,7 +26,7 @@ TRACE_EVENT(timer_init,
|
||||
__entry->timer = timer;
|
||||
),
|
||||
|
||||
TP_printk("timer %p", __entry->timer)
|
||||
TP_printk("timer=%p", __entry->timer)
|
||||
);
|
||||
|
||||
/**
|
||||
@ -54,7 +54,7 @@ TRACE_EVENT(timer_start,
|
||||
__entry->now = jiffies;
|
||||
),
|
||||
|
||||
TP_printk("timer %p: func %pf, expires %lu, timeout %ld",
|
||||
TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
|
||||
__entry->timer, __entry->function, __entry->expires,
|
||||
(long)__entry->expires - __entry->now)
|
||||
);
|
||||
@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry,
|
||||
__entry->now = jiffies;
|
||||
),
|
||||
|
||||
TP_printk("timer %p: now %lu", __entry->timer, __entry->now)
|
||||
TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
|
||||
);
|
||||
|
||||
/**
|
||||
@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit,
|
||||
__entry->timer = timer;
|
||||
),
|
||||
|
||||
TP_printk("timer %p", __entry->timer)
|
||||
TP_printk("timer=%p", __entry->timer)
|
||||
);
|
||||
|
||||
/**
|
||||
@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel,
|
||||
__entry->timer = timer;
|
||||
),
|
||||
|
||||
TP_printk("timer %p", __entry->timer)
|
||||
TP_printk("timer=%p", __entry->timer)
|
||||
);
|
||||
|
||||
/**
|
||||
@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel,
|
||||
*/
|
||||
TRACE_EVENT(hrtimer_init,
|
||||
|
||||
TP_PROTO(struct hrtimer *timer, clockid_t clockid,
|
||||
TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
|
||||
enum hrtimer_mode mode),
|
||||
|
||||
TP_ARGS(timer, clockid, mode),
|
||||
TP_ARGS(hrtimer, clockid, mode),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( void *, timer )
|
||||
__field( void *, hrtimer )
|
||||
__field( clockid_t, clockid )
|
||||
__field( enum hrtimer_mode, mode )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timer = timer;
|
||||
__entry->hrtimer = hrtimer;
|
||||
__entry->clockid = clockid;
|
||||
__entry->mode = mode;
|
||||
),
|
||||
|
||||
TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer,
|
||||
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
|
||||
__entry->clockid == CLOCK_REALTIME ?
|
||||
"CLOCK_REALTIME" : "CLOCK_MONOTONIC",
|
||||
__entry->mode == HRTIMER_MODE_ABS ?
|
||||
@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init,
|
||||
*/
|
||||
TRACE_EVENT(hrtimer_start,
|
||||
|
||||
TP_PROTO(struct hrtimer *timer),
|
||||
TP_PROTO(struct hrtimer *hrtimer),
|
||||
|
||||
TP_ARGS(timer),
|
||||
TP_ARGS(hrtimer),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( void *, timer )
|
||||
__field( void *, hrtimer )
|
||||
__field( void *, function )
|
||||
__field( s64, expires )
|
||||
__field( s64, softexpires )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timer = timer;
|
||||
__entry->function = timer->function;
|
||||
__entry->expires = hrtimer_get_expires(timer).tv64;
|
||||
__entry->softexpires = hrtimer_get_softexpires(timer).tv64;
|
||||
__entry->hrtimer = hrtimer;
|
||||
__entry->function = hrtimer->function;
|
||||
__entry->expires = hrtimer_get_expires(hrtimer).tv64;
|
||||
__entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
|
||||
),
|
||||
|
||||
TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu",
|
||||
__entry->timer, __entry->function,
|
||||
TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
|
||||
__entry->hrtimer, __entry->function,
|
||||
(unsigned long long)ktime_to_ns((ktime_t) {
|
||||
.tv64 = __entry->expires }),
|
||||
(unsigned long long)ktime_to_ns((ktime_t) {
|
||||
@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start,
|
||||
*/
|
||||
TRACE_EVENT(hrtimer_expire_entry,
|
||||
|
||||
TP_PROTO(struct hrtimer *timer, ktime_t *now),
|
||||
TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
|
||||
|
||||
TP_ARGS(timer, now),
|
||||
TP_ARGS(hrtimer, now),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( void *, timer )
|
||||
__field( void *, hrtimer )
|
||||
__field( s64, now )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timer = timer;
|
||||
__entry->now = now->tv64;
|
||||
__entry->hrtimer = hrtimer;
|
||||
__entry->now = now->tv64;
|
||||
),
|
||||
|
||||
TP_printk("hrtimer %p, now %llu", __entry->timer,
|
||||
(unsigned long long)ktime_to_ns((ktime_t) {
|
||||
.tv64 = __entry->now }))
|
||||
TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
|
||||
(unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
|
||||
);
|
||||
|
||||
/**
|
||||
@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry,
|
||||
*/
|
||||
TRACE_EVENT(hrtimer_expire_exit,
|
||||
|
||||
TP_PROTO(struct hrtimer *timer),
|
||||
TP_PROTO(struct hrtimer *hrtimer),
|
||||
|
||||
TP_ARGS(timer),
|
||||
TP_ARGS(hrtimer),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( void *, timer )
|
||||
__field( void *, hrtimer )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timer = timer;
|
||||
__entry->hrtimer = hrtimer;
|
||||
),
|
||||
|
||||
TP_printk("hrtimer %p", __entry->timer)
|
||||
TP_printk("hrtimer=%p", __entry->hrtimer)
|
||||
);
|
||||
|
||||
/**
|
||||
* hrtimer_cancel - called when the hrtimer is canceled
|
||||
* @timer: pointer to struct hrtimer
|
||||
* @hrtimer: pointer to struct hrtimer
|
||||
*/
|
||||
TRACE_EVENT(hrtimer_cancel,
|
||||
|
||||
TP_PROTO(struct hrtimer *timer),
|
||||
TP_PROTO(struct hrtimer *hrtimer),
|
||||
|
||||
TP_ARGS(timer),
|
||||
TP_ARGS(hrtimer),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( void *, timer )
|
||||
__field( void *, hrtimer )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timer = timer;
|
||||
__entry->hrtimer = hrtimer;
|
||||
),
|
||||
|
||||
TP_printk("hrtimer %p", __entry->timer)
|
||||
TP_printk("hrtimer=%p", __entry->hrtimer)
|
||||
);
|
||||
|
||||
/**
|
||||
@ -302,7 +301,7 @@ TRACE_EVENT(itimer_state,
|
||||
__entry->interval_usec = value->it_interval.tv_usec;
|
||||
),
|
||||
|
||||
TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu",
|
||||
TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu",
|
||||
__entry->which, __entry->expires,
|
||||
__entry->value_sec, __entry->value_usec,
|
||||
__entry->interval_sec, __entry->interval_usec)
|
||||
@ -332,7 +331,7 @@ TRACE_EVENT(itimer_expire,
|
||||
__entry->pid = pid_nr(pid);
|
||||
),
|
||||
|
||||
TP_printk("which %d, pid %d, now %lu", __entry->which,
|
||||
TP_printk("which=%d pid=%d now=%lu", __entry->which,
|
||||
(int) __entry->pid, __entry->now)
|
||||
);
|
||||
|
||||
|
@ -120,9 +120,10 @@
|
||||
#undef __field
|
||||
#define __field(type, item) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
|
||||
"offset:%u;\tsize:%u;\n", \
|
||||
"offset:%u;\tsize:%u;\tsigned:%u;\n", \
|
||||
(unsigned int)offsetof(typeof(field), item), \
|
||||
(unsigned int)sizeof(field.item)); \
|
||||
(unsigned int)sizeof(field.item), \
|
||||
(unsigned int)is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
@ -132,19 +133,21 @@
|
||||
#undef __array
|
||||
#define __array(type, item, len) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
|
||||
"offset:%u;\tsize:%u;\n", \
|
||||
"offset:%u;\tsize:%u;\tsigned:%u;\n", \
|
||||
(unsigned int)offsetof(typeof(field), item), \
|
||||
(unsigned int)sizeof(field.item)); \
|
||||
(unsigned int)sizeof(field.item), \
|
||||
(unsigned int)is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef __dynamic_array
|
||||
#define __dynamic_array(type, item, len) \
|
||||
ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
|
||||
"offset:%u;\tsize:%u;\n", \
|
||||
"offset:%u;\tsize:%u;\tsigned:%u;\n", \
|
||||
(unsigned int)offsetof(typeof(field), \
|
||||
__data_loc_##item), \
|
||||
(unsigned int)sizeof(field.__data_loc_##item)); \
|
||||
(unsigned int)sizeof(field.__data_loc_##item), \
|
||||
(unsigned int)is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
|
@ -33,7 +33,7 @@ struct syscall_metadata {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
extern struct syscall_metadata *syscall_nr_to_meta(int nr);
|
||||
extern unsigned long arch_syscall_addr(int nr);
|
||||
extern int syscall_name_to_nr(char *name);
|
||||
void set_syscall_enter_id(int num, int id);
|
||||
void set_syscall_exit_id(int num, int id);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
@ -1355,7 +1356,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
||||
u64 interrupts, freq;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
list_for_each_entry(event, &ctx->group_list, group_entry) {
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
continue;
|
||||
|
||||
@ -1658,6 +1659,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void perf_event_free_filter(struct perf_event *event);
|
||||
|
||||
static void free_event_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct perf_event *event;
|
||||
@ -1665,6 +1668,7 @@ static void free_event_rcu(struct rcu_head *head)
|
||||
event = container_of(head, struct perf_event, rcu_head);
|
||||
if (event->ns)
|
||||
put_pid_ns(event->ns);
|
||||
perf_event_free_filter(event);
|
||||
kfree(event);
|
||||
}
|
||||
|
||||
@ -1974,7 +1978,8 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int perf_event_set_output(struct perf_event *event, int output_fd);
|
||||
static int perf_event_set_output(struct perf_event *event, int output_fd);
|
||||
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
|
||||
|
||||
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
@ -2002,6 +2007,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
case PERF_EVENT_IOC_SET_OUTPUT:
|
||||
return perf_event_set_output(event, arg);
|
||||
|
||||
case PERF_EVENT_IOC_SET_FILTER:
|
||||
return perf_event_set_filter(event, (void __user *)arg);
|
||||
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
@ -3806,9 +3814,14 @@ static int perf_swevent_is_counting(struct perf_event *event)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int perf_tp_event_match(struct perf_event *event,
|
||||
struct perf_sample_data *data);
|
||||
|
||||
static int perf_swevent_match(struct perf_event *event,
|
||||
enum perf_type_id type,
|
||||
u32 event_id, struct pt_regs *regs)
|
||||
u32 event_id,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (!perf_swevent_is_counting(event))
|
||||
return 0;
|
||||
@ -3826,6 +3839,10 @@ static int perf_swevent_match(struct perf_event *event,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (event->attr.type == PERF_TYPE_TRACEPOINT &&
|
||||
!perf_tp_event_match(event, data))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -3842,7 +3859,7 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (perf_swevent_match(event, type, event_id, regs))
|
||||
if (perf_swevent_match(event, type, event_id, data, regs))
|
||||
perf_swevent_add(event, nr, nmi, data, regs);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@ -4086,6 +4103,7 @@ static const struct pmu perf_ops_task_clock = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
|
||||
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||
int entry_size)
|
||||
{
|
||||
@ -4109,8 +4127,15 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_tp_event);
|
||||
|
||||
extern int ftrace_profile_enable(int);
|
||||
extern void ftrace_profile_disable(int);
|
||||
static int perf_tp_event_match(struct perf_event *event,
|
||||
struct perf_sample_data *data)
|
||||
{
|
||||
void *record = data->raw->data;
|
||||
|
||||
if (likely(!event->filter) || filter_match_preds(event->filter, record))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tp_perf_event_destroy(struct perf_event *event)
|
||||
{
|
||||
@ -4135,12 +4160,53 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
||||
|
||||
return &perf_ops_generic;
|
||||
}
|
||||
|
||||
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
|
||||
{
|
||||
char *filter_str;
|
||||
int ret;
|
||||
|
||||
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
||||
return -EINVAL;
|
||||
|
||||
filter_str = strndup_user(arg, PAGE_SIZE);
|
||||
if (IS_ERR(filter_str))
|
||||
return PTR_ERR(filter_str);
|
||||
|
||||
ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
|
||||
|
||||
kfree(filter_str);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void perf_event_free_filter(struct perf_event *event)
|
||||
{
|
||||
ftrace_profile_free_filter(event);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static int perf_tp_event_match(struct perf_event *event,
|
||||
struct perf_sample_data *data)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static void perf_event_free_filter(struct perf_event *event)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
|
||||
atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
||||
|
||||
@ -4394,7 +4460,7 @@ err_size:
|
||||
goto out;
|
||||
}
|
||||
|
||||
int perf_event_set_output(struct perf_event *event, int output_fd)
|
||||
static int perf_event_set_output(struct perf_event *event, int output_fd)
|
||||
{
|
||||
struct perf_event *output_event = NULL;
|
||||
struct file *output_file = NULL;
|
||||
|
@ -60,6 +60,13 @@ static int last_ftrace_enabled;
|
||||
/* Quick disabling of function tracer. */
|
||||
int function_trace_stop;
|
||||
|
||||
/* List for set_ftrace_pid's pids. */
|
||||
LIST_HEAD(ftrace_pids);
|
||||
struct ftrace_pid {
|
||||
struct list_head list;
|
||||
struct pid *pid;
|
||||
};
|
||||
|
||||
/*
|
||||
* ftrace_disabled is set when an anomaly is discovered.
|
||||
* ftrace_disabled is much stronger than ftrace_enabled.
|
||||
@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
|
||||
#endif
|
||||
|
||||
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct ftrace_ops *op = ftrace_list;
|
||||
@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||
else
|
||||
func = ftrace_list_func;
|
||||
|
||||
if (ftrace_pid_trace) {
|
||||
if (!list_empty(&ftrace_pids)) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
}
|
||||
@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
||||
if (ftrace_list->next == &ftrace_list_end) {
|
||||
ftrace_func_t func = ftrace_list->func;
|
||||
|
||||
if (ftrace_pid_trace) {
|
||||
if (!list_empty(&ftrace_pids)) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
}
|
||||
@ -231,7 +242,7 @@ static void ftrace_update_pid_func(void)
|
||||
func = __ftrace_trace_function;
|
||||
#endif
|
||||
|
||||
if (ftrace_pid_trace) {
|
||||
if (!list_empty(&ftrace_pids)) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
} else {
|
||||
@ -821,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_PROFILER */
|
||||
|
||||
/* set when tracing only a pid */
|
||||
struct pid *ftrace_pid_trace;
|
||||
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
@ -1261,12 +1270,34 @@ static int ftrace_update_code(struct module *mod)
|
||||
ftrace_new_addrs = p->newlist;
|
||||
p->flags = 0L;
|
||||
|
||||
/* convert record (i.e, patch mcount-call with NOP) */
|
||||
if (ftrace_code_disable(mod, p)) {
|
||||
p->flags |= FTRACE_FL_CONVERTED;
|
||||
ftrace_update_cnt++;
|
||||
} else
|
||||
/*
|
||||
* Do the initial record convertion from mcount jump
|
||||
* to the NOP instructions.
|
||||
*/
|
||||
if (!ftrace_code_disable(mod, p)) {
|
||||
ftrace_free_rec(p);
|
||||
continue;
|
||||
}
|
||||
|
||||
p->flags |= FTRACE_FL_CONVERTED;
|
||||
ftrace_update_cnt++;
|
||||
|
||||
/*
|
||||
* If the tracing is enabled, go ahead and enable the record.
|
||||
*
|
||||
* The reason not to enable the record immediatelly is the
|
||||
* inherent check of ftrace_make_nop/ftrace_make_call for
|
||||
* correct previous instructions. Making first the NOP
|
||||
* conversion puts the module to the correct state, thus
|
||||
* passing the ftrace_make_call check.
|
||||
*/
|
||||
if (ftrace_start_up) {
|
||||
int failed = __ftrace_replace_code(p, 1);
|
||||
if (failed) {
|
||||
ftrace_bug(failed, p->ip);
|
||||
ftrace_free_rec(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stop = ftrace_now(raw_smp_processor_id());
|
||||
@ -1656,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum {
|
||||
MATCH_FULL,
|
||||
MATCH_FRONT_ONLY,
|
||||
MATCH_MIDDLE_ONLY,
|
||||
MATCH_END_ONLY,
|
||||
};
|
||||
|
||||
/*
|
||||
* (static function - no need for kernel doc)
|
||||
*
|
||||
* Pass in a buffer containing a glob and this function will
|
||||
* set search to point to the search part of the buffer and
|
||||
* return the type of search it is (see enum above).
|
||||
* This does modify buff.
|
||||
*
|
||||
* Returns enum type.
|
||||
* search returns the pointer to use for comparison.
|
||||
* not returns 1 if buff started with a '!'
|
||||
* 0 otherwise.
|
||||
*/
|
||||
static int
|
||||
ftrace_setup_glob(char *buff, int len, char **search, int *not)
|
||||
{
|
||||
int type = MATCH_FULL;
|
||||
int i;
|
||||
|
||||
if (buff[0] == '!') {
|
||||
*not = 1;
|
||||
buff++;
|
||||
len--;
|
||||
} else
|
||||
*not = 0;
|
||||
|
||||
*search = buff;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (buff[i] == '*') {
|
||||
if (!i) {
|
||||
*search = buff + 1;
|
||||
type = MATCH_END_ONLY;
|
||||
} else {
|
||||
if (type == MATCH_END_ONLY)
|
||||
type = MATCH_MIDDLE_ONLY;
|
||||
else
|
||||
type = MATCH_FRONT_ONLY;
|
||||
buff[i] = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static int ftrace_match(char *str, char *regex, int len, int type)
|
||||
{
|
||||
int matched = 0;
|
||||
@ -1758,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
|
||||
int not;
|
||||
|
||||
flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
|
||||
type = ftrace_setup_glob(buff, len, &search, ¬);
|
||||
type = filter_parse_regex(buff, len, &search, ¬);
|
||||
|
||||
search_len = strlen(search);
|
||||
|
||||
@ -1826,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
|
||||
}
|
||||
|
||||
if (strlen(buff)) {
|
||||
type = ftrace_setup_glob(buff, strlen(buff), &search, ¬);
|
||||
type = filter_parse_regex(buff, strlen(buff), &search, ¬);
|
||||
search_len = strlen(search);
|
||||
}
|
||||
|
||||
@ -1991,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
int count = 0;
|
||||
char *search;
|
||||
|
||||
type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
|
||||
type = filter_parse_regex(glob, strlen(glob), &search, ¬);
|
||||
len = strlen(search);
|
||||
|
||||
/* we do not support '!' for function probes */
|
||||
@ -2068,7 +2045,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
else if (glob) {
|
||||
int not;
|
||||
|
||||
type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
|
||||
type = filter_parse_regex(glob, strlen(glob), &search, ¬);
|
||||
len = strlen(search);
|
||||
|
||||
/* we do not support '!' for function probes */
|
||||
@ -2297,6 +2274,7 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
|
||||
#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
|
||||
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
|
||||
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
|
||||
static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
|
||||
|
||||
static int __init set_ftrace_notrace(char *str)
|
||||
{
|
||||
@ -2312,6 +2290,31 @@ static int __init set_ftrace_filter(char *str)
|
||||
}
|
||||
__setup("ftrace_filter=", set_ftrace_filter);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static int __init set_graph_function(char *str)
|
||||
{
|
||||
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
|
||||
return 1;
|
||||
}
|
||||
__setup("ftrace_graph_filter=", set_graph_function);
|
||||
|
||||
static void __init set_ftrace_early_graph(char *buf)
|
||||
{
|
||||
int ret;
|
||||
char *func;
|
||||
|
||||
while (buf) {
|
||||
func = strsep(&buf, ",");
|
||||
/* we allow only one expression at a time */
|
||||
ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
|
||||
func);
|
||||
if (ret)
|
||||
printk(KERN_DEBUG "ftrace: function %s not "
|
||||
"traceable\n", func);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
static void __init set_ftrace_early_filter(char *buf, int enable)
|
||||
{
|
||||
char *func;
|
||||
@ -2328,6 +2331,10 @@ static void __init set_ftrace_early_filters(void)
|
||||
set_ftrace_early_filter(ftrace_filter_buf, 1);
|
||||
if (ftrace_notrace_buf[0])
|
||||
set_ftrace_early_filter(ftrace_notrace_buf, 0);
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (ftrace_graph_buf[0])
|
||||
set_ftrace_early_graph(ftrace_graph_buf);
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2513,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
|
||||
return -ENODEV;
|
||||
|
||||
/* decode regex */
|
||||
type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬);
|
||||
type = filter_parse_regex(buffer, strlen(buffer), &search, ¬);
|
||||
if (not)
|
||||
return -EINVAL;
|
||||
|
||||
@ -2624,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ftrace_convert_nops(struct module *mod,
|
||||
static int ftrace_process_locs(struct module *mod,
|
||||
unsigned long *start,
|
||||
unsigned long *end)
|
||||
{
|
||||
@ -2684,7 +2691,7 @@ static void ftrace_init_module(struct module *mod,
|
||||
{
|
||||
if (ftrace_disabled || start == end)
|
||||
return;
|
||||
ftrace_convert_nops(mod, start, end);
|
||||
ftrace_process_locs(mod, start, end);
|
||||
}
|
||||
|
||||
static int ftrace_module_notify(struct notifier_block *self,
|
||||
@ -2745,7 +2752,7 @@ void __init ftrace_init(void)
|
||||
|
||||
last_ftrace_enabled = ftrace_enabled = 1;
|
||||
|
||||
ret = ftrace_convert_nops(NULL,
|
||||
ret = ftrace_process_locs(NULL,
|
||||
__start_mcount_loc,
|
||||
__stop_mcount_loc);
|
||||
|
||||
@ -2778,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { }
|
||||
# define ftrace_shutdown_sysctl() do { } while (0)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
static ssize_t
|
||||
ftrace_pid_read(struct file *file, char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
char buf[64];
|
||||
int r;
|
||||
|
||||
if (ftrace_pid_trace == ftrace_swapper_pid)
|
||||
r = sprintf(buf, "swapper tasks\n");
|
||||
else if (ftrace_pid_trace)
|
||||
r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
|
||||
else
|
||||
r = sprintf(buf, "no pid\n");
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||
}
|
||||
|
||||
static void clear_ftrace_swapper(void)
|
||||
{
|
||||
struct task_struct *p;
|
||||
@ -2845,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void clear_ftrace_pid_task(struct pid **pid)
|
||||
static void clear_ftrace_pid_task(struct pid *pid)
|
||||
{
|
||||
if (*pid == ftrace_swapper_pid)
|
||||
if (pid == ftrace_swapper_pid)
|
||||
clear_ftrace_swapper();
|
||||
else
|
||||
clear_ftrace_pid(*pid);
|
||||
|
||||
*pid = NULL;
|
||||
clear_ftrace_pid(pid);
|
||||
}
|
||||
|
||||
static void set_ftrace_pid_task(struct pid *pid)
|
||||
@ -2863,11 +2851,140 @@ static void set_ftrace_pid_task(struct pid *pid)
|
||||
set_ftrace_pid(pid);
|
||||
}
|
||||
|
||||
static int ftrace_pid_add(int p)
|
||||
{
|
||||
struct pid *pid;
|
||||
struct ftrace_pid *fpid;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
|
||||
if (!p)
|
||||
pid = ftrace_swapper_pid;
|
||||
else
|
||||
pid = find_get_pid(p);
|
||||
|
||||
if (!pid)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
|
||||
list_for_each_entry(fpid, &ftrace_pids, list)
|
||||
if (fpid->pid == pid)
|
||||
goto out_put;
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
|
||||
if (!fpid)
|
||||
goto out_put;
|
||||
|
||||
list_add(&fpid->list, &ftrace_pids);
|
||||
fpid->pid = pid;
|
||||
|
||||
set_ftrace_pid_task(pid);
|
||||
|
||||
ftrace_update_pid_func();
|
||||
ftrace_startup_enable(0);
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
return 0;
|
||||
|
||||
out_put:
|
||||
if (pid != ftrace_swapper_pid)
|
||||
put_pid(pid);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ftrace_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ftrace_pid_reset(void)
|
||||
{
|
||||
struct ftrace_pid *fpid, *safe;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
|
||||
struct pid *pid = fpid->pid;
|
||||
|
||||
clear_ftrace_pid_task(pid);
|
||||
|
||||
list_del(&fpid->list);
|
||||
kfree(fpid);
|
||||
}
|
||||
|
||||
ftrace_update_pid_func();
|
||||
ftrace_startup_enable(0);
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
}
|
||||
|
||||
static void *fpid_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
mutex_lock(&ftrace_lock);
|
||||
|
||||
if (list_empty(&ftrace_pids) && (!*pos))
|
||||
return (void *) 1;
|
||||
|
||||
return seq_list_start(&ftrace_pids, *pos);
|
||||
}
|
||||
|
||||
static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
if (v == (void *)1)
|
||||
return NULL;
|
||||
|
||||
return seq_list_next(v, &ftrace_pids, pos);
|
||||
}
|
||||
|
||||
static void fpid_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
mutex_unlock(&ftrace_lock);
|
||||
}
|
||||
|
||||
static int fpid_show(struct seq_file *m, void *v)
|
||||
{
|
||||
const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
|
||||
|
||||
if (v == (void *)1) {
|
||||
seq_printf(m, "no pid\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (fpid->pid == ftrace_swapper_pid)
|
||||
seq_printf(m, "swapper tasks\n");
|
||||
else
|
||||
seq_printf(m, "%u\n", pid_vnr(fpid->pid));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations ftrace_pid_sops = {
|
||||
.start = fpid_start,
|
||||
.next = fpid_next,
|
||||
.stop = fpid_stop,
|
||||
.show = fpid_show,
|
||||
};
|
||||
|
||||
static int
|
||||
ftrace_pid_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
(file->f_flags & O_TRUNC))
|
||||
ftrace_pid_reset();
|
||||
|
||||
if (file->f_mode & FMODE_READ)
|
||||
ret = seq_open(file, &ftrace_pid_sops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ftrace_pid_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
struct pid *pid;
|
||||
char buf[64];
|
||||
long val;
|
||||
int ret;
|
||||
@ -2880,57 +2997,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
/*
|
||||
* Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
|
||||
* to clean the filter quietly.
|
||||
*/
|
||||
strstrip(buf);
|
||||
if (strlen(buf) == 0)
|
||||
return 1;
|
||||
|
||||
ret = strict_strtol(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
if (val < 0) {
|
||||
/* disable pid tracing */
|
||||
if (!ftrace_pid_trace)
|
||||
goto out;
|
||||
ret = ftrace_pid_add(val);
|
||||
|
||||
clear_ftrace_pid_task(&ftrace_pid_trace);
|
||||
return ret ? ret : cnt;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* swapper task is special */
|
||||
if (!val) {
|
||||
pid = ftrace_swapper_pid;
|
||||
if (pid == ftrace_pid_trace)
|
||||
goto out;
|
||||
} else {
|
||||
pid = find_get_pid(val);
|
||||
static int
|
||||
ftrace_pid_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
if (file->f_mode & FMODE_READ)
|
||||
seq_release(inode, file);
|
||||
|
||||
if (pid == ftrace_pid_trace) {
|
||||
put_pid(pid);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (ftrace_pid_trace)
|
||||
clear_ftrace_pid_task(&ftrace_pid_trace);
|
||||
|
||||
if (!pid)
|
||||
goto out;
|
||||
|
||||
ftrace_pid_trace = pid;
|
||||
|
||||
set_ftrace_pid_task(ftrace_pid_trace);
|
||||
}
|
||||
|
||||
/* update the function call */
|
||||
ftrace_update_pid_func();
|
||||
ftrace_startup_enable(0);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
return cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations ftrace_pid_fops = {
|
||||
.read = ftrace_pid_read,
|
||||
.write = ftrace_pid_write,
|
||||
.open = ftrace_pid_open,
|
||||
.write = ftrace_pid_write,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = ftrace_pid_release,
|
||||
};
|
||||
|
||||
static __init int ftrace_init_debugfs(void)
|
||||
|
@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s)
|
||||
int ret;
|
||||
|
||||
ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
|
||||
"offset:0;\tsize:%u;\n",
|
||||
(unsigned int)sizeof(field.time_stamp));
|
||||
"offset:0;\tsize:%u;\tsigned:%u;\n",
|
||||
(unsigned int)sizeof(field.time_stamp),
|
||||
(unsigned int)is_signed_type(u64));
|
||||
|
||||
ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
|
||||
"offset:%u;\tsize:%u;\n",
|
||||
"offset:%u;\tsize:%u;\tsigned:%u;\n",
|
||||
(unsigned int)offsetof(typeof(field), commit),
|
||||
(unsigned int)sizeof(field.commit));
|
||||
(unsigned int)sizeof(field.commit),
|
||||
(unsigned int)is_signed_type(long));
|
||||
|
||||
ret = trace_seq_printf(s, "\tfield: char data;\t"
|
||||
"offset:%u;\tsize:%u;\n",
|
||||
"offset:%u;\tsize:%u;\tsigned:%u;\n",
|
||||
(unsigned int)offsetof(typeof(field), data),
|
||||
(unsigned int)BUF_PAGE_SIZE);
|
||||
(unsigned int)BUF_PAGE_SIZE,
|
||||
(unsigned int)is_signed_type(char));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf);
|
||||
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
|
||||
static char *default_bootup_tracer;
|
||||
|
||||
static int __init set_ftrace(char *str)
|
||||
static int __init set_cmdline_ftrace(char *str)
|
||||
{
|
||||
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
|
||||
default_bootup_tracer = bootup_tracer_buf;
|
||||
@ -137,7 +137,7 @@ static int __init set_ftrace(char *str)
|
||||
ring_buffer_expanded = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("ftrace=", set_ftrace);
|
||||
__setup("ftrace=", set_cmdline_ftrace);
|
||||
|
||||
static int __init set_ftrace_dump_on_oops(char *str)
|
||||
{
|
||||
|
@ -506,10 +506,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int ftrace_trace_addr(unsigned long addr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
static inline int ftrace_graph_addr(unsigned long addr)
|
||||
{
|
||||
return 1;
|
||||
@ -523,12 +519,12 @@ print_graph_function(struct trace_iterator *iter)
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
extern struct pid *ftrace_pid_trace;
|
||||
extern struct list_head ftrace_pids;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
static inline int ftrace_trace_task(struct task_struct *task)
|
||||
{
|
||||
if (!ftrace_pid_trace)
|
||||
if (list_empty(&ftrace_pids))
|
||||
return 1;
|
||||
|
||||
return test_tsk_trace_trace(task);
|
||||
@ -710,7 +706,6 @@ struct event_filter {
|
||||
int n_preds;
|
||||
struct filter_pred **preds;
|
||||
char *filter_string;
|
||||
bool no_reset;
|
||||
};
|
||||
|
||||
struct event_subsystem {
|
||||
@ -722,22 +717,40 @@ struct event_subsystem {
|
||||
};
|
||||
|
||||
struct filter_pred;
|
||||
struct regex;
|
||||
|
||||
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
|
||||
int val1, int val2);
|
||||
|
||||
struct filter_pred {
|
||||
filter_pred_fn_t fn;
|
||||
u64 val;
|
||||
char str_val[MAX_FILTER_STR_VAL];
|
||||
int str_len;
|
||||
char *field_name;
|
||||
int offset;
|
||||
int not;
|
||||
int op;
|
||||
int pop_n;
|
||||
typedef int (*regex_match_func)(char *str, struct regex *r, int len);
|
||||
|
||||
enum regex_type {
|
||||
MATCH_FULL = 0,
|
||||
MATCH_FRONT_ONLY,
|
||||
MATCH_MIDDLE_ONLY,
|
||||
MATCH_END_ONLY,
|
||||
};
|
||||
|
||||
struct regex {
|
||||
char pattern[MAX_FILTER_STR_VAL];
|
||||
int len;
|
||||
int field_len;
|
||||
regex_match_func match;
|
||||
};
|
||||
|
||||
struct filter_pred {
|
||||
filter_pred_fn_t fn;
|
||||
u64 val;
|
||||
struct regex regex;
|
||||
char *field_name;
|
||||
int offset;
|
||||
int not;
|
||||
int op;
|
||||
int pop_n;
|
||||
};
|
||||
|
||||
extern enum regex_type
|
||||
filter_parse_regex(char *buff, int len, char **search, int *not);
|
||||
extern void print_event_filter(struct ftrace_event_call *call,
|
||||
struct trace_seq *s);
|
||||
extern int apply_event_filter(struct ftrace_event_call *call,
|
||||
@ -753,7 +766,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event)
|
||||
{
|
||||
if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) {
|
||||
if (unlikely(call->filter_active) &&
|
||||
!filter_match_preds(call->filter, rec)) {
|
||||
ring_buffer_discard_commit(buffer, event);
|
||||
return 1;
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ extern char *__bad_type_size(void);
|
||||
#define FIELD(type, name) \
|
||||
sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
|
||||
#type, "common_" #name, offsetof(typeof(field), name), \
|
||||
sizeof(field.name)
|
||||
sizeof(field.name), is_signed_type(type)
|
||||
|
||||
static int trace_write_header(struct trace_seq *s)
|
||||
{
|
||||
@ -511,17 +511,17 @@ static int trace_write_header(struct trace_seq *s)
|
||||
|
||||
/* struct trace_entry */
|
||||
return trace_seq_printf(s,
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\n",
|
||||
FIELD(unsigned short, type),
|
||||
FIELD(unsigned char, flags),
|
||||
FIELD(unsigned char, preempt_count),
|
||||
FIELD(int, pid),
|
||||
FIELD(int, lock_depth));
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
|
||||
"\n",
|
||||
FIELD(unsigned short, type),
|
||||
FIELD(unsigned char, flags),
|
||||
FIELD(unsigned char, preempt_count),
|
||||
FIELD(int, pid),
|
||||
FIELD(int, lock_depth));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -874,9 +874,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
|
||||
"'%s/filter' entry\n", name);
|
||||
}
|
||||
|
||||
entry = trace_create_file("enable", 0644, system->entry,
|
||||
(void *)system->name,
|
||||
&ftrace_system_enable_fops);
|
||||
trace_create_file("enable", 0644, system->entry,
|
||||
(void *)system->name,
|
||||
&ftrace_system_enable_fops);
|
||||
|
||||
return system->entry;
|
||||
}
|
||||
@ -888,7 +888,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
|
||||
const struct file_operations *filter,
|
||||
const struct file_operations *format)
|
||||
{
|
||||
struct dentry *entry;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -906,12 +905,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
|
||||
}
|
||||
|
||||
if (call->regfunc)
|
||||
entry = trace_create_file("enable", 0644, call->dir, call,
|
||||
enable);
|
||||
trace_create_file("enable", 0644, call->dir, call,
|
||||
enable);
|
||||
|
||||
if (call->id && call->profile_enable)
|
||||
entry = trace_create_file("id", 0444, call->dir, call,
|
||||
id);
|
||||
trace_create_file("id", 0444, call->dir, call,
|
||||
id);
|
||||
|
||||
if (call->define_fields) {
|
||||
ret = call->define_fields(call);
|
||||
@ -920,16 +919,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
|
||||
" events/%s\n", call->name);
|
||||
return ret;
|
||||
}
|
||||
entry = trace_create_file("filter", 0644, call->dir, call,
|
||||
filter);
|
||||
trace_create_file("filter", 0644, call->dir, call,
|
||||
filter);
|
||||
}
|
||||
|
||||
/* A trace may not want to export its format */
|
||||
if (!call->show_format)
|
||||
return 0;
|
||||
|
||||
entry = trace_create_file("format", 0444, call->dir, call,
|
||||
format);
|
||||
trace_create_file("format", 0444, call->dir, call,
|
||||
format);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -18,11 +18,10 @@
|
||||
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
@ -31,6 +30,7 @@ enum filter_op_ids
|
||||
{
|
||||
OP_OR,
|
||||
OP_AND,
|
||||
OP_GLOB,
|
||||
OP_NE,
|
||||
OP_EQ,
|
||||
OP_LT,
|
||||
@ -48,16 +48,17 @@ struct filter_op {
|
||||
};
|
||||
|
||||
static struct filter_op filter_ops[] = {
|
||||
{ OP_OR, "||", 1 },
|
||||
{ OP_AND, "&&", 2 },
|
||||
{ OP_NE, "!=", 4 },
|
||||
{ OP_EQ, "==", 4 },
|
||||
{ OP_LT, "<", 5 },
|
||||
{ OP_LE, "<=", 5 },
|
||||
{ OP_GT, ">", 5 },
|
||||
{ OP_GE, ">=", 5 },
|
||||
{ OP_NONE, "OP_NONE", 0 },
|
||||
{ OP_OPEN_PAREN, "(", 0 },
|
||||
{ OP_OR, "||", 1 },
|
||||
{ OP_AND, "&&", 2 },
|
||||
{ OP_GLOB, "~", 4 },
|
||||
{ OP_NE, "!=", 4 },
|
||||
{ OP_EQ, "==", 4 },
|
||||
{ OP_LT, "<", 5 },
|
||||
{ OP_LE, "<=", 5 },
|
||||
{ OP_GT, ">", 5 },
|
||||
{ OP_GE, ">=", 5 },
|
||||
{ OP_NONE, "OP_NONE", 0 },
|
||||
{ OP_OPEN_PAREN, "(", 0 },
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -197,9 +198,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
|
||||
char *addr = (char *)(event + pred->offset);
|
||||
int cmp, match;
|
||||
|
||||
cmp = strncmp(addr, pred->str_val, pred->str_len);
|
||||
cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
|
||||
|
||||
match = (!cmp) ^ pred->not;
|
||||
match = cmp ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
@ -211,9 +212,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
|
||||
char **addr = (char **)(event + pred->offset);
|
||||
int cmp, match;
|
||||
|
||||
cmp = strncmp(*addr, pred->str_val, pred->str_len);
|
||||
cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len);
|
||||
|
||||
match = (!cmp) ^ pred->not;
|
||||
match = cmp ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
@ -237,9 +238,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event,
|
||||
char *addr = (char *)(event + str_loc);
|
||||
int cmp, match;
|
||||
|
||||
cmp = strncmp(addr, pred->str_val, str_len);
|
||||
cmp = pred->regex.match(addr, &pred->regex, str_len);
|
||||
|
||||
match = (!cmp) ^ pred->not;
|
||||
match = cmp ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
@ -250,10 +251,121 @@ static int filter_pred_none(struct filter_pred *pred, void *event,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* return 1 if event matches, 0 otherwise (discard) */
|
||||
int filter_match_preds(struct ftrace_event_call *call, void *rec)
|
||||
/* Basic regex callbacks */
|
||||
static int regex_match_full(char *str, struct regex *r, int len)
|
||||
{
|
||||
if (strncmp(str, r->pattern, len) == 0)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int regex_match_front(char *str, struct regex *r, int len)
|
||||
{
|
||||
if (strncmp(str, r->pattern, len) == 0)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int regex_match_middle(char *str, struct regex *r, int len)
|
||||
{
|
||||
if (strstr(str, r->pattern))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int regex_match_end(char *str, struct regex *r, int len)
|
||||
{
|
||||
char *ptr = strstr(str, r->pattern);
|
||||
|
||||
if (ptr && (ptr[r->len] == 0))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* filter_parse_regex - parse a basic regex
|
||||
* @buff: the raw regex
|
||||
* @len: length of the regex
|
||||
* @search: will point to the beginning of the string to compare
|
||||
* @not: tell whether the match will have to be inverted
|
||||
*
|
||||
* This passes in a buffer containing a regex and this function will
|
||||
* set search to point to the search part of the buffer and
|
||||
* return the type of search it is (see enum above).
|
||||
* This does modify buff.
|
||||
*
|
||||
* Returns enum type.
|
||||
* search returns the pointer to use for comparison.
|
||||
* not returns 1 if buff started with a '!'
|
||||
* 0 otherwise.
|
||||
*/
|
||||
enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
|
||||
{
|
||||
int type = MATCH_FULL;
|
||||
int i;
|
||||
|
||||
if (buff[0] == '!') {
|
||||
*not = 1;
|
||||
buff++;
|
||||
len--;
|
||||
} else
|
||||
*not = 0;
|
||||
|
||||
*search = buff;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (buff[i] == '*') {
|
||||
if (!i) {
|
||||
*search = buff + 1;
|
||||
type = MATCH_END_ONLY;
|
||||
} else {
|
||||
if (type == MATCH_END_ONLY)
|
||||
type = MATCH_MIDDLE_ONLY;
|
||||
else
|
||||
type = MATCH_FRONT_ONLY;
|
||||
buff[i] = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static void filter_build_regex(struct filter_pred *pred)
|
||||
{
|
||||
struct regex *r = &pred->regex;
|
||||
char *search;
|
||||
enum regex_type type = MATCH_FULL;
|
||||
int not = 0;
|
||||
|
||||
if (pred->op == OP_GLOB) {
|
||||
type = filter_parse_regex(r->pattern, r->len, &search, ¬);
|
||||
r->len = strlen(search);
|
||||
memmove(r->pattern, search, r->len+1);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case MATCH_FULL:
|
||||
r->match = regex_match_full;
|
||||
break;
|
||||
case MATCH_FRONT_ONLY:
|
||||
r->match = regex_match_front;
|
||||
break;
|
||||
case MATCH_MIDDLE_ONLY:
|
||||
r->match = regex_match_middle;
|
||||
break;
|
||||
case MATCH_END_ONLY:
|
||||
r->match = regex_match_end;
|
||||
break;
|
||||
}
|
||||
|
||||
pred->not ^= not;
|
||||
}
|
||||
|
||||
/* return 1 if event matches, 0 otherwise (discard) */
|
||||
int filter_match_preds(struct event_filter *filter, void *rec)
|
||||
{
|
||||
struct event_filter *filter = call->filter;
|
||||
int match, top = 0, val1 = 0, val2 = 0;
|
||||
int stack[MAX_FILTER_PRED];
|
||||
struct filter_pred *pred;
|
||||
@ -396,7 +508,7 @@ static void filter_clear_pred(struct filter_pred *pred)
|
||||
{
|
||||
kfree(pred->field_name);
|
||||
pred->field_name = NULL;
|
||||
pred->str_len = 0;
|
||||
pred->regex.len = 0;
|
||||
}
|
||||
|
||||
static int filter_set_pred(struct filter_pred *dest,
|
||||
@ -426,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call)
|
||||
filter->preds[i]->fn = filter_pred_none;
|
||||
}
|
||||
|
||||
void destroy_preds(struct ftrace_event_call *call)
|
||||
static void __free_preds(struct event_filter *filter)
|
||||
{
|
||||
struct event_filter *filter = call->filter;
|
||||
int i;
|
||||
|
||||
if (!filter)
|
||||
@ -441,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call)
|
||||
kfree(filter->preds);
|
||||
kfree(filter->filter_string);
|
||||
kfree(filter);
|
||||
call->filter = NULL;
|
||||
}
|
||||
|
||||
static int init_preds(struct ftrace_event_call *call)
|
||||
void destroy_preds(struct ftrace_event_call *call)
|
||||
{
|
||||
__free_preds(call->filter);
|
||||
call->filter = NULL;
|
||||
call->filter_active = 0;
|
||||
}
|
||||
|
||||
static struct event_filter *__alloc_preds(void)
|
||||
{
|
||||
struct event_filter *filter;
|
||||
struct filter_pred *pred;
|
||||
int i;
|
||||
|
||||
if (call->filter)
|
||||
return 0;
|
||||
|
||||
filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL);
|
||||
if (!call->filter)
|
||||
return -ENOMEM;
|
||||
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
|
||||
if (!filter)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
filter->n_preds = 0;
|
||||
|
||||
@ -471,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call)
|
||||
filter->preds[i] = pred;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return filter;
|
||||
|
||||
oom:
|
||||
destroy_preds(call);
|
||||
__free_preds(filter);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
static int init_preds(struct ftrace_event_call *call)
|
||||
{
|
||||
if (call->filter)
|
||||
return 0;
|
||||
|
||||
call->filter_active = 0;
|
||||
call->filter = __alloc_preds();
|
||||
if (IS_ERR(call->filter))
|
||||
return PTR_ERR(call->filter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_subsystem_preds(struct event_subsystem *system)
|
||||
@ -499,14 +625,7 @@ static int init_subsystem_preds(struct event_subsystem *system)
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
FILTER_DISABLE_ALL,
|
||||
FILTER_INIT_NO_RESET,
|
||||
FILTER_SKIP_NO_RESET,
|
||||
};
|
||||
|
||||
static void filter_free_subsystem_preds(struct event_subsystem *system,
|
||||
int flag)
|
||||
static void filter_free_subsystem_preds(struct event_subsystem *system)
|
||||
{
|
||||
struct ftrace_event_call *call;
|
||||
|
||||
@ -517,14 +636,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system,
|
||||
if (strcmp(call->system, system->name) != 0)
|
||||
continue;
|
||||
|
||||
if (flag == FILTER_INIT_NO_RESET) {
|
||||
call->filter->no_reset = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset)
|
||||
continue;
|
||||
|
||||
filter_disable_preds(call);
|
||||
remove_filter_string(call->filter);
|
||||
}
|
||||
@ -532,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system,
|
||||
|
||||
static int filter_add_pred_fn(struct filter_parse_state *ps,
|
||||
struct ftrace_event_call *call,
|
||||
struct event_filter *filter,
|
||||
struct filter_pred *pred,
|
||||
filter_pred_fn_t fn)
|
||||
{
|
||||
struct event_filter *filter = call->filter;
|
||||
int idx, err;
|
||||
|
||||
if (filter->n_preds == MAX_FILTER_PRED) {
|
||||
@ -550,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
|
||||
return err;
|
||||
|
||||
filter->n_preds++;
|
||||
call->filter_active = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -575,7 +685,10 @@ static bool is_string_field(struct ftrace_event_field *field)
|
||||
|
||||
static int is_legal_op(struct ftrace_event_field *field, int op)
|
||||
{
|
||||
if (is_string_field(field) && (op != OP_EQ && op != OP_NE))
|
||||
if (is_string_field(field) &&
|
||||
(op != OP_EQ && op != OP_NE && op != OP_GLOB))
|
||||
return 0;
|
||||
if (!is_string_field(field) && op == OP_GLOB)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
@ -626,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size,
|
||||
|
||||
static int filter_add_pred(struct filter_parse_state *ps,
|
||||
struct ftrace_event_call *call,
|
||||
struct event_filter *filter,
|
||||
struct filter_pred *pred,
|
||||
bool dry_run)
|
||||
{
|
||||
@ -660,21 +774,22 @@ static int filter_add_pred(struct filter_parse_state *ps,
|
||||
}
|
||||
|
||||
if (is_string_field(field)) {
|
||||
pred->str_len = field->size;
|
||||
filter_build_regex(pred);
|
||||
|
||||
if (field->filter_type == FILTER_STATIC_STRING)
|
||||
if (field->filter_type == FILTER_STATIC_STRING) {
|
||||
fn = filter_pred_string;
|
||||
else if (field->filter_type == FILTER_DYN_STRING)
|
||||
pred->regex.field_len = field->size;
|
||||
} else if (field->filter_type == FILTER_DYN_STRING)
|
||||
fn = filter_pred_strloc;
|
||||
else {
|
||||
fn = filter_pred_pchar;
|
||||
pred->str_len = strlen(pred->str_val);
|
||||
pred->regex.field_len = strlen(pred->regex.pattern);
|
||||
}
|
||||
} else {
|
||||
if (field->is_signed)
|
||||
ret = strict_strtoll(pred->str_val, 0, &val);
|
||||
ret = strict_strtoll(pred->regex.pattern, 0, &val);
|
||||
else
|
||||
ret = strict_strtoull(pred->str_val, 0, &val);
|
||||
ret = strict_strtoull(pred->regex.pattern, 0, &val);
|
||||
if (ret) {
|
||||
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
|
||||
return -EINVAL;
|
||||
@ -694,45 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps,
|
||||
|
||||
add_pred_fn:
|
||||
if (!dry_run)
|
||||
return filter_add_pred_fn(ps, call, pred, fn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int filter_add_subsystem_pred(struct filter_parse_state *ps,
|
||||
struct event_subsystem *system,
|
||||
struct filter_pred *pred,
|
||||
char *filter_string,
|
||||
bool dry_run)
|
||||
{
|
||||
struct ftrace_event_call *call;
|
||||
int err = 0;
|
||||
bool fail = true;
|
||||
|
||||
list_for_each_entry(call, &ftrace_events, list) {
|
||||
|
||||
if (!call->define_fields)
|
||||
continue;
|
||||
|
||||
if (strcmp(call->system, system->name))
|
||||
continue;
|
||||
|
||||
if (call->filter->no_reset)
|
||||
continue;
|
||||
|
||||
err = filter_add_pred(ps, call, pred, dry_run);
|
||||
if (err)
|
||||
call->filter->no_reset = true;
|
||||
else
|
||||
fail = false;
|
||||
|
||||
if (!dry_run)
|
||||
replace_filter_string(call->filter, filter_string);
|
||||
}
|
||||
|
||||
if (fail) {
|
||||
parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
|
||||
return err;
|
||||
}
|
||||
return filter_add_pred_fn(ps, call, filter, pred, fn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1045,8 +1122,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcpy(pred->str_val, operand2);
|
||||
pred->str_len = strlen(operand2);
|
||||
strcpy(pred->regex.pattern, operand2);
|
||||
pred->regex.len = strlen(pred->regex.pattern);
|
||||
|
||||
pred->op = op;
|
||||
|
||||
@ -1090,8 +1167,8 @@ static int check_preds(struct filter_parse_state *ps)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int replace_preds(struct event_subsystem *system,
|
||||
struct ftrace_event_call *call,
|
||||
static int replace_preds(struct ftrace_event_call *call,
|
||||
struct event_filter *filter,
|
||||
struct filter_parse_state *ps,
|
||||
char *filter_string,
|
||||
bool dry_run)
|
||||
@ -1138,11 +1215,7 @@ static int replace_preds(struct event_subsystem *system,
|
||||
add_pred:
|
||||
if (!pred)
|
||||
return -ENOMEM;
|
||||
if (call)
|
||||
err = filter_add_pred(ps, call, pred, false);
|
||||
else
|
||||
err = filter_add_subsystem_pred(ps, system, pred,
|
||||
filter_string, dry_run);
|
||||
err = filter_add_pred(ps, call, filter, pred, dry_run);
|
||||
filter_free_pred(pred);
|
||||
if (err)
|
||||
return err;
|
||||
@ -1153,10 +1226,50 @@ add_pred:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int replace_system_preds(struct event_subsystem *system,
|
||||
struct filter_parse_state *ps,
|
||||
char *filter_string)
|
||||
{
|
||||
struct event_filter *filter = system->filter;
|
||||
struct ftrace_event_call *call;
|
||||
bool fail = true;
|
||||
int err;
|
||||
|
||||
list_for_each_entry(call, &ftrace_events, list) {
|
||||
|
||||
if (!call->define_fields)
|
||||
continue;
|
||||
|
||||
if (strcmp(call->system, system->name) != 0)
|
||||
continue;
|
||||
|
||||
/* try to see if the filter can be applied */
|
||||
err = replace_preds(call, filter, ps, filter_string, true);
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
/* really apply the filter */
|
||||
filter_disable_preds(call);
|
||||
err = replace_preds(call, filter, ps, filter_string, false);
|
||||
if (err)
|
||||
filter_disable_preds(call);
|
||||
else {
|
||||
call->filter_active = 1;
|
||||
replace_filter_string(filter, filter_string);
|
||||
}
|
||||
fail = false;
|
||||
}
|
||||
|
||||
if (fail) {
|
||||
parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
|
||||
{
|
||||
int err;
|
||||
|
||||
struct filter_parse_state *ps;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
@ -1168,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
|
||||
if (!strcmp(strstrip(filter_string), "0")) {
|
||||
filter_disable_preds(call);
|
||||
remove_filter_string(call->filter);
|
||||
mutex_unlock(&event_mutex);
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
@ -1187,10 +1299,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = replace_preds(NULL, call, ps, filter_string, false);
|
||||
err = replace_preds(call, call->filter, ps, filter_string, false);
|
||||
if (err)
|
||||
append_filter_err(ps, call->filter);
|
||||
|
||||
else
|
||||
call->filter_active = 1;
|
||||
out:
|
||||
filter_opstack_clear(ps);
|
||||
postfix_clear(ps);
|
||||
@ -1205,7 +1318,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
|
||||
char *filter_string)
|
||||
{
|
||||
int err;
|
||||
|
||||
struct filter_parse_state *ps;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
@ -1215,10 +1327,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
|
||||
goto out_unlock;
|
||||
|
||||
if (!strcmp(strstrip(filter_string), "0")) {
|
||||
filter_free_subsystem_preds(system, FILTER_DISABLE_ALL);
|
||||
filter_free_subsystem_preds(system);
|
||||
remove_filter_string(system->filter);
|
||||
mutex_unlock(&event_mutex);
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
@ -1235,23 +1346,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
|
||||
goto out;
|
||||
}
|
||||
|
||||
filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET);
|
||||
|
||||
/* try to see the filter can be applied to which events */
|
||||
err = replace_preds(system, NULL, ps, filter_string, true);
|
||||
if (err) {
|
||||
err = replace_system_preds(system, ps, filter_string);
|
||||
if (err)
|
||||
append_filter_err(ps, system->filter);
|
||||
goto out;
|
||||
}
|
||||
|
||||
filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET);
|
||||
|
||||
/* really apply the filter to the events */
|
||||
err = replace_preds(system, NULL, ps, filter_string, false);
|
||||
if (err) {
|
||||
append_filter_err(ps, system->filter);
|
||||
filter_free_subsystem_preds(system, 2);
|
||||
}
|
||||
|
||||
out:
|
||||
filter_opstack_clear(ps);
|
||||
@ -1263,3 +1360,73 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
|
||||
void ftrace_profile_free_filter(struct perf_event *event)
|
||||
{
|
||||
struct event_filter *filter = event->filter;
|
||||
|
||||
event->filter = NULL;
|
||||
__free_preds(filter);
|
||||
}
|
||||
|
||||
int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||
char *filter_str)
|
||||
{
|
||||
int err;
|
||||
struct event_filter *filter;
|
||||
struct filter_parse_state *ps;
|
||||
struct ftrace_event_call *call = NULL;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
|
||||
list_for_each_entry(call, &ftrace_events, list) {
|
||||
if (call->id == event_id)
|
||||
break;
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
if (!call)
|
||||
goto out_unlock;
|
||||
|
||||
err = -EEXIST;
|
||||
if (event->filter)
|
||||
goto out_unlock;
|
||||
|
||||
filter = __alloc_preds();
|
||||
if (IS_ERR(filter)) {
|
||||
err = PTR_ERR(filter);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
ps = kzalloc(sizeof(*ps), GFP_KERNEL);
|
||||
if (!ps)
|
||||
goto free_preds;
|
||||
|
||||
parse_init(ps, filter_ops, filter_str);
|
||||
err = filter_parse(ps);
|
||||
if (err)
|
||||
goto free_ps;
|
||||
|
||||
err = replace_preds(call, filter, ps, filter_str, false);
|
||||
if (!err)
|
||||
event->filter = filter;
|
||||
|
||||
free_ps:
|
||||
filter_opstack_clear(ps);
|
||||
postfix_clear(ps);
|
||||
kfree(ps);
|
||||
|
||||
free_preds:
|
||||
if (err)
|
||||
__free_preds(filter);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
|
||||
|
@ -66,44 +66,47 @@ static void __used ____ftrace_check_##name(void) \
|
||||
#undef __field
|
||||
#define __field(type, item) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
|
||||
"offset:%zu;\tsize:%zu;\n", \
|
||||
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
|
||||
offsetof(typeof(field), item), \
|
||||
sizeof(field.item)); \
|
||||
sizeof(field.item), is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef __field_desc
|
||||
#define __field_desc(type, container, item) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
|
||||
"offset:%zu;\tsize:%zu;\n", \
|
||||
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
|
||||
offsetof(typeof(field), container.item), \
|
||||
sizeof(field.container.item)); \
|
||||
sizeof(field.container.item), \
|
||||
is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, len) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
|
||||
"offset:%zu;\tsize:%zu;\n", \
|
||||
offsetof(typeof(field), item), \
|
||||
sizeof(field.item)); \
|
||||
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
|
||||
offsetof(typeof(field), item), \
|
||||
sizeof(field.item), is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef __array_desc
|
||||
#define __array_desc(type, container, item, len) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
|
||||
"offset:%zu;\tsize:%zu;\n", \
|
||||
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
|
||||
offsetof(typeof(field), container.item), \
|
||||
sizeof(field.container.item)); \
|
||||
sizeof(field.container.item), \
|
||||
is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef __dynamic_array
|
||||
#define __dynamic_array(type, item) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
|
||||
"offset:%zu;\tsize:0;\n", \
|
||||
offsetof(typeof(field), item)); \
|
||||
"offset:%zu;\tsize:0;\tsigned:%u;\n", \
|
||||
offsetof(typeof(field), item), \
|
||||
is_signed_type(type)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
|
@ -14,6 +14,69 @@ static int sys_refcount_exit;
|
||||
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
|
||||
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
|
||||
|
||||
extern unsigned long __start_syscalls_metadata[];
|
||||
extern unsigned long __stop_syscalls_metadata[];
|
||||
|
||||
static struct syscall_metadata **syscalls_metadata;
|
||||
|
||||
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
|
||||
{
|
||||
struct syscall_metadata *start;
|
||||
struct syscall_metadata *stop;
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
|
||||
|
||||
start = (struct syscall_metadata *)__start_syscalls_metadata;
|
||||
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
|
||||
kallsyms_lookup(syscall, NULL, NULL, NULL, str);
|
||||
|
||||
for ( ; start < stop; start++) {
|
||||
/*
|
||||
* Only compare after the "sys" prefix. Archs that use
|
||||
* syscall wrappers may have syscalls symbols aliases prefixed
|
||||
* with "SyS" instead of "sys", leading to an unwanted
|
||||
* mismatch.
|
||||
*/
|
||||
if (start->name && !strcmp(start->name + 3, str + 3))
|
||||
return start;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct syscall_metadata *syscall_nr_to_meta(int nr)
|
||||
{
|
||||
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
|
||||
return NULL;
|
||||
|
||||
return syscalls_metadata[nr];
|
||||
}
|
||||
|
||||
int syscall_name_to_nr(char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!syscalls_metadata)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < NR_syscalls; i++) {
|
||||
if (syscalls_metadata[i]) {
|
||||
if (!strcmp(syscalls_metadata[i]->name, name))
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void set_syscall_enter_id(int num, int id)
|
||||
{
|
||||
syscalls_metadata[num]->enter_id = id;
|
||||
}
|
||||
|
||||
void set_syscall_exit_id(int num, int id)
|
||||
{
|
||||
syscalls_metadata[num]->exit_id = id;
|
||||
}
|
||||
|
||||
enum print_line_t
|
||||
print_syscall_enter(struct trace_iterator *iter, int flags)
|
||||
{
|
||||
@ -103,7 +166,8 @@ extern char *__bad_type_size(void);
|
||||
#define SYSCALL_FIELD(type, name) \
|
||||
sizeof(type) != sizeof(trace.name) ? \
|
||||
__bad_type_size() : \
|
||||
#type, #name, offsetof(typeof(trace), name), sizeof(trace.name)
|
||||
#type, #name, offsetof(typeof(trace), name), \
|
||||
sizeof(trace.name), is_signed_type(type)
|
||||
|
||||
int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
|
||||
{
|
||||
@ -120,7 +184,8 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
|
||||
if (!entry)
|
||||
return 0;
|
||||
|
||||
ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
|
||||
ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
|
||||
"\tsigned:%u;\n",
|
||||
SYSCALL_FIELD(int, nr));
|
||||
if (!ret)
|
||||
return 0;
|
||||
@ -130,8 +195,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
|
||||
entry->args[i]);
|
||||
if (!ret)
|
||||
return 0;
|
||||
ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset,
|
||||
sizeof(unsigned long));
|
||||
ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
|
||||
"\tsigned:%u;\n", offset,
|
||||
sizeof(unsigned long),
|
||||
is_signed_type(unsigned long));
|
||||
if (!ret)
|
||||
return 0;
|
||||
offset += sizeof(unsigned long);
|
||||
@ -163,8 +230,10 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
|
||||
struct syscall_trace_exit trace;
|
||||
|
||||
ret = trace_seq_printf(s,
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
|
||||
"\tsigned:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
|
||||
"\tsigned:%u;\n",
|
||||
SYSCALL_FIELD(int, nr),
|
||||
SYSCALL_FIELD(long, ret));
|
||||
if (!ret)
|
||||
@ -212,7 +281,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
|
||||
ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
|
||||
FILTER_OTHER);
|
||||
|
||||
return ret;
|
||||
@ -375,6 +444,29 @@ struct trace_event event_syscall_exit = {
|
||||
.trace = print_syscall_exit,
|
||||
};
|
||||
|
||||
int __init init_ftrace_syscalls(void)
|
||||
{
|
||||
struct syscall_metadata *meta;
|
||||
unsigned long addr;
|
||||
int i;
|
||||
|
||||
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
|
||||
NR_syscalls, GFP_KERNEL);
|
||||
if (!syscalls_metadata) {
|
||||
WARN_ON(1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_syscalls; i++) {
|
||||
addr = arch_syscall_addr(i);
|
||||
meta = find_syscall_meta(addr);
|
||||
syscalls_metadata[i] = meta;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(init_ftrace_syscalls);
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
|
||||
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
|
||||
|
@ -5,10 +5,13 @@
|
||||
* relegated to obsolescence, but used by various less
|
||||
* important (or lazy) subsystems.
|
||||
*/
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/bkl.h>
|
||||
|
||||
/*
|
||||
* The 'big kernel lock'
|
||||
@ -113,21 +116,26 @@ static inline void __unlock_kernel(void)
|
||||
* This cannot happen asynchronously, so we only need to
|
||||
* worry about other CPU's.
|
||||
*/
|
||||
void __lockfunc lock_kernel(void)
|
||||
void __lockfunc _lock_kernel(const char *func, const char *file, int line)
|
||||
{
|
||||
int depth = current->lock_depth+1;
|
||||
int depth = current->lock_depth + 1;
|
||||
|
||||
trace_lock_kernel(func, file, line);
|
||||
|
||||
if (likely(!depth))
|
||||
__lock_kernel();
|
||||
current->lock_depth = depth;
|
||||
}
|
||||
|
||||
void __lockfunc unlock_kernel(void)
|
||||
void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
|
||||
{
|
||||
BUG_ON(current->lock_depth < 0);
|
||||
if (likely(--current->lock_depth < 0))
|
||||
__unlock_kernel();
|
||||
|
||||
trace_unlock_kernel(func, file, line);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(lock_kernel);
|
||||
EXPORT_SYMBOL(unlock_kernel);
|
||||
EXPORT_SYMBOL(_lock_kernel);
|
||||
EXPORT_SYMBOL(_unlock_kernel);
|
||||
|
||||
|
@ -119,6 +119,7 @@ my %text_sections = (
|
||||
".sched.text" => 1,
|
||||
".spinlock.text" => 1,
|
||||
".irqentry.text" => 1,
|
||||
".text.unlikely" => 1,
|
||||
);
|
||||
|
||||
$objdump = "objdump" if ((length $objdump) == 0);
|
||||
|
@ -31,9 +31,12 @@ OPTIONS
|
||||
-w::
|
||||
--width=::
|
||||
Select the width of the SVG file (default: 1000)
|
||||
-p::
|
||||
-P::
|
||||
--power-only::
|
||||
Only output the CPU power section of the diagram
|
||||
-p::
|
||||
--process::
|
||||
Select the processes to display, by name or PID
|
||||
|
||||
|
||||
SEE ALSO
|
||||
|
@ -201,7 +201,14 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
|
||||
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
|
||||
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
|
||||
|
||||
CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
|
||||
ifeq ("$(origin DEBUG)", "command line")
|
||||
PERF_DEBUG = $(DEBUG)
|
||||
endif
|
||||
ifndef PERF_DEBUG
|
||||
CFLAGS_OPTIMIZE = -O6
|
||||
endif
|
||||
|
||||
CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
|
||||
LDFLAGS = -lpthread -lrt -lelf -lm
|
||||
ALL_CFLAGS = $(CFLAGS)
|
||||
ALL_LDFLAGS = $(LDFLAGS)
|
||||
@ -329,8 +336,26 @@ LIB_H += ../../include/linux/perf_event.h
|
||||
LIB_H += ../../include/linux/rbtree.h
|
||||
LIB_H += ../../include/linux/list.h
|
||||
LIB_H += ../../include/linux/stringify.h
|
||||
LIB_H += util/include/linux/bitmap.h
|
||||
LIB_H += util/include/linux/bitops.h
|
||||
LIB_H += util/include/linux/compiler.h
|
||||
LIB_H += util/include/linux/ctype.h
|
||||
LIB_H += util/include/linux/kernel.h
|
||||
LIB_H += util/include/linux/list.h
|
||||
LIB_H += util/include/linux/module.h
|
||||
LIB_H += util/include/linux/poison.h
|
||||
LIB_H += util/include/linux/prefetch.h
|
||||
LIB_H += util/include/linux/rbtree.h
|
||||
LIB_H += util/include/linux/string.h
|
||||
LIB_H += util/include/linux/types.h
|
||||
LIB_H += util/include/asm/asm-offsets.h
|
||||
LIB_H += util/include/asm/bitops.h
|
||||
LIB_H += util/include/asm/byteorder.h
|
||||
LIB_H += util/include/asm/swab.h
|
||||
LIB_H += util/include/asm/system.h
|
||||
LIB_H += util/include/asm/uaccess.h
|
||||
LIB_H += perf.h
|
||||
LIB_H += util/event.h
|
||||
LIB_H += util/types.h
|
||||
LIB_H += util/levenshtein.h
|
||||
LIB_H += util/parse-options.h
|
||||
@ -344,9 +369,12 @@ LIB_H += util/strlist.h
|
||||
LIB_H += util/run-command.h
|
||||
LIB_H += util/sigchain.h
|
||||
LIB_H += util/symbol.h
|
||||
LIB_H += util/module.h
|
||||
LIB_H += util/color.h
|
||||
LIB_H += util/values.h
|
||||
LIB_H += util/sort.h
|
||||
LIB_H += util/hist.h
|
||||
LIB_H += util/thread.h
|
||||
LIB_H += util/data_map.h
|
||||
|
||||
LIB_OBJS += util/abspath.o
|
||||
LIB_OBJS += util/alias.o
|
||||
@ -360,6 +388,9 @@ LIB_OBJS += util/parse-options.o
|
||||
LIB_OBJS += util/parse-events.o
|
||||
LIB_OBJS += util/path.o
|
||||
LIB_OBJS += util/rbtree.o
|
||||
LIB_OBJS += util/bitmap.o
|
||||
LIB_OBJS += util/hweight.o
|
||||
LIB_OBJS += util/find_next_bit.o
|
||||
LIB_OBJS += util/run-command.o
|
||||
LIB_OBJS += util/quote.o
|
||||
LIB_OBJS += util/strbuf.o
|
||||
@ -369,7 +400,6 @@ LIB_OBJS += util/usage.o
|
||||
LIB_OBJS += util/wrapper.o
|
||||
LIB_OBJS += util/sigchain.o
|
||||
LIB_OBJS += util/symbol.o
|
||||
LIB_OBJS += util/module.o
|
||||
LIB_OBJS += util/color.o
|
||||
LIB_OBJS += util/pager.o
|
||||
LIB_OBJS += util/header.o
|
||||
@ -382,6 +412,9 @@ LIB_OBJS += util/trace-event-parse.o
|
||||
LIB_OBJS += util/trace-event-read.o
|
||||
LIB_OBJS += util/trace-event-info.o
|
||||
LIB_OBJS += util/svghelper.o
|
||||
LIB_OBJS += util/sort.o
|
||||
LIB_OBJS += util/hist.o
|
||||
LIB_OBJS += util/data_map.o
|
||||
|
||||
BUILTIN_OBJS += builtin-annotate.o
|
||||
BUILTIN_OBJS += builtin-help.o
|
||||
@ -424,8 +457,12 @@ ifeq ($(uname_S),Darwin)
|
||||
PTHREAD_LIBS =
|
||||
endif
|
||||
|
||||
ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
|
||||
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]);
|
||||
endif
|
||||
|
||||
ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
|
||||
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
|
||||
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
|
||||
endif
|
||||
|
||||
ifneq ($(shell sh -c "(echo '\#include <libdwarf/dwarf.h>'; echo '\#include <libdwarf/libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
|
||||
@ -795,6 +832,19 @@ util/config.o: util/config.c PERF-CFLAGS
|
||||
util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||
|
||||
# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing
|
||||
# from <string.h> that comes from kernel headers wrapping.
|
||||
KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//`
|
||||
|
||||
util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||
|
||||
util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||
|
||||
util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||
|
||||
perf-%$X: %.o $(PERFLIBS)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
|
||||
|
||||
|
@ -22,15 +22,13 @@
|
||||
#include "util/parse-options.h"
|
||||
#include "util/parse-events.h"
|
||||
#include "util/thread.h"
|
||||
#include "util/sort.h"
|
||||
#include "util/hist.h"
|
||||
|
||||
static char const *input_name = "perf.data";
|
||||
|
||||
static char default_sort_order[] = "comm,symbol";
|
||||
static char *sort_order = default_sort_order;
|
||||
|
||||
static int force;
|
||||
static int input;
|
||||
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
|
||||
|
||||
static int full_paths;
|
||||
|
||||
@ -39,9 +37,10 @@ static int print_line;
|
||||
static unsigned long page_size;
|
||||
static unsigned long mmap_window = 32;
|
||||
|
||||
static struct rb_root threads;
|
||||
static struct thread *last_match;
|
||||
|
||||
struct sym_hist {
|
||||
u64 sum;
|
||||
u64 ip[0];
|
||||
};
|
||||
|
||||
struct sym_ext {
|
||||
struct rb_node node;
|
||||
@ -49,247 +48,33 @@ struct sym_ext {
|
||||
char *path;
|
||||
};
|
||||
|
||||
/*
|
||||
* histogram, sorted on item, collects counts
|
||||
*/
|
||||
|
||||
static struct rb_root hist;
|
||||
|
||||
struct hist_entry {
|
||||
struct rb_node rb_node;
|
||||
|
||||
struct thread *thread;
|
||||
struct map *map;
|
||||
struct dso *dso;
|
||||
struct symbol *sym;
|
||||
u64 ip;
|
||||
char level;
|
||||
|
||||
uint32_t count;
|
||||
struct sym_priv {
|
||||
struct sym_hist *hist;
|
||||
struct sym_ext *ext;
|
||||
};
|
||||
|
||||
/*
|
||||
* configurable sorting bits
|
||||
*/
|
||||
static const char *sym_hist_filter;
|
||||
|
||||
struct sort_entry {
|
||||
struct list_head list;
|
||||
|
||||
const char *header;
|
||||
|
||||
int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
|
||||
int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
|
||||
size_t (*print)(FILE *fp, struct hist_entry *);
|
||||
};
|
||||
|
||||
/* --sort pid */
|
||||
|
||||
static int64_t
|
||||
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
static int symbol_filter(struct map *map, struct symbol *sym)
|
||||
{
|
||||
return right->thread->pid - left->thread->pid;
|
||||
}
|
||||
|
||||
static size_t
|
||||
sort__thread_print(FILE *fp, struct hist_entry *self)
|
||||
{
|
||||
return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
|
||||
}
|
||||
|
||||
static struct sort_entry sort_thread = {
|
||||
.header = " Command: Pid",
|
||||
.cmp = sort__thread_cmp,
|
||||
.print = sort__thread_print,
|
||||
};
|
||||
|
||||
/* --sort comm */
|
||||
|
||||
static int64_t
|
||||
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
return right->thread->pid - left->thread->pid;
|
||||
}
|
||||
|
||||
static int64_t
|
||||
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
char *comm_l = left->thread->comm;
|
||||
char *comm_r = right->thread->comm;
|
||||
|
||||
if (!comm_l || !comm_r) {
|
||||
if (!comm_l && !comm_r)
|
||||
return 0;
|
||||
else if (!comm_l)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
return strcmp(comm_l, comm_r);
|
||||
}
|
||||
|
||||
static size_t
|
||||
sort__comm_print(FILE *fp, struct hist_entry *self)
|
||||
{
|
||||
return fprintf(fp, "%16s", self->thread->comm);
|
||||
}
|
||||
|
||||
static struct sort_entry sort_comm = {
|
||||
.header = " Command",
|
||||
.cmp = sort__comm_cmp,
|
||||
.collapse = sort__comm_collapse,
|
||||
.print = sort__comm_print,
|
||||
};
|
||||
|
||||
/* --sort dso */
|
||||
|
||||
static int64_t
|
||||
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct dso *dso_l = left->dso;
|
||||
struct dso *dso_r = right->dso;
|
||||
|
||||
if (!dso_l || !dso_r) {
|
||||
if (!dso_l && !dso_r)
|
||||
return 0;
|
||||
else if (!dso_l)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
return strcmp(dso_l->name, dso_r->name);
|
||||
}
|
||||
|
||||
static size_t
|
||||
sort__dso_print(FILE *fp, struct hist_entry *self)
|
||||
{
|
||||
if (self->dso)
|
||||
return fprintf(fp, "%-25s", self->dso->name);
|
||||
|
||||
return fprintf(fp, "%016llx ", (u64)self->ip);
|
||||
}
|
||||
|
||||
static struct sort_entry sort_dso = {
|
||||
.header = "Shared Object ",
|
||||
.cmp = sort__dso_cmp,
|
||||
.print = sort__dso_print,
|
||||
};
|
||||
|
||||
/* --sort symbol */
|
||||
|
||||
static int64_t
|
||||
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
u64 ip_l, ip_r;
|
||||
|
||||
if (left->sym == right->sym)
|
||||
return 0;
|
||||
|
||||
ip_l = left->sym ? left->sym->start : left->ip;
|
||||
ip_r = right->sym ? right->sym->start : right->ip;
|
||||
|
||||
return (int64_t)(ip_r - ip_l);
|
||||
}
|
||||
|
||||
static size_t
|
||||
sort__sym_print(FILE *fp, struct hist_entry *self)
|
||||
{
|
||||
size_t ret = 0;
|
||||
|
||||
if (verbose)
|
||||
ret += fprintf(fp, "%#018llx ", (u64)self->ip);
|
||||
|
||||
if (self->sym) {
|
||||
ret += fprintf(fp, "[%c] %s",
|
||||
self->dso == kernel_dso ? 'k' : '.', self->sym->name);
|
||||
} else {
|
||||
ret += fprintf(fp, "%#016llx", (u64)self->ip);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sort_entry sort_sym = {
|
||||
.header = "Symbol",
|
||||
.cmp = sort__sym_cmp,
|
||||
.print = sort__sym_print,
|
||||
};
|
||||
|
||||
static int sort__need_collapse = 0;
|
||||
|
||||
struct sort_dimension {
|
||||
const char *name;
|
||||
struct sort_entry *entry;
|
||||
int taken;
|
||||
};
|
||||
|
||||
static struct sort_dimension sort_dimensions[] = {
|
||||
{ .name = "pid", .entry = &sort_thread, },
|
||||
{ .name = "comm", .entry = &sort_comm, },
|
||||
{ .name = "dso", .entry = &sort_dso, },
|
||||
{ .name = "symbol", .entry = &sort_sym, },
|
||||
};
|
||||
|
||||
static LIST_HEAD(hist_entry__sort_list);
|
||||
|
||||
static int sort_dimension__add(char *tok)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
|
||||
struct sort_dimension *sd = &sort_dimensions[i];
|
||||
|
||||
if (sd->taken)
|
||||
continue;
|
||||
|
||||
if (strncasecmp(tok, sd->name, strlen(tok)))
|
||||
continue;
|
||||
|
||||
if (sd->entry->collapse)
|
||||
sort__need_collapse = 1;
|
||||
|
||||
list_add_tail(&sd->entry->list, &hist_entry__sort_list);
|
||||
sd->taken = 1;
|
||||
if (sym_hist_filter == NULL ||
|
||||
strcmp(sym->name, sym_hist_filter) == 0) {
|
||||
struct sym_priv *priv = dso__sym_priv(map->dso, sym);
|
||||
const int size = (sizeof(*priv->hist) +
|
||||
(sym->end - sym->start) * sizeof(u64));
|
||||
|
||||
priv->hist = malloc(size);
|
||||
if (priv->hist)
|
||||
memset(priv->hist, 0, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
static int64_t
|
||||
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct sort_entry *se;
|
||||
int64_t cmp = 0;
|
||||
|
||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||
cmp = se->cmp(left, right);
|
||||
if (cmp)
|
||||
break;
|
||||
}
|
||||
|
||||
return cmp;
|
||||
}
|
||||
|
||||
static int64_t
|
||||
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct sort_entry *se;
|
||||
int64_t cmp = 0;
|
||||
|
||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||
int64_t (*f)(struct hist_entry *, struct hist_entry *);
|
||||
|
||||
f = se->collapse ?: se->cmp;
|
||||
|
||||
cmp = f(left, right);
|
||||
if (cmp)
|
||||
break;
|
||||
}
|
||||
|
||||
return cmp;
|
||||
/*
|
||||
* FIXME: We should really filter it out, as we don't want to go thru symbols
|
||||
* we're not interested, and if a DSO ends up with no symbols, delete it too,
|
||||
* but right now the kernel loading routines in symbol.c bail out if no symbols
|
||||
* are found, fix it later.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -299,196 +84,60 @@ static void hist_hit(struct hist_entry *he, u64 ip)
|
||||
{
|
||||
unsigned int sym_size, offset;
|
||||
struct symbol *sym = he->sym;
|
||||
struct sym_priv *priv;
|
||||
struct sym_hist *h;
|
||||
|
||||
he->count++;
|
||||
|
||||
if (!sym || !sym->hist)
|
||||
if (!sym || !he->map)
|
||||
return;
|
||||
|
||||
priv = dso__sym_priv(he->map->dso, sym);
|
||||
if (!priv->hist)
|
||||
return;
|
||||
|
||||
sym_size = sym->end - sym->start;
|
||||
offset = ip - sym->start;
|
||||
|
||||
if (verbose)
|
||||
fprintf(stderr, "%s: ip=%Lx\n", __func__,
|
||||
he->map->unmap_ip(he->map, ip));
|
||||
|
||||
if (offset >= sym_size)
|
||||
return;
|
||||
|
||||
sym->hist_sum++;
|
||||
sym->hist[offset]++;
|
||||
h = priv->hist;
|
||||
h->sum++;
|
||||
h->ip[offset]++;
|
||||
|
||||
if (verbose >= 3)
|
||||
printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n",
|
||||
(void *)(unsigned long)he->sym->start,
|
||||
he->sym->name,
|
||||
(void *)(unsigned long)ip, ip - he->sym->start,
|
||||
sym->hist[offset]);
|
||||
h->ip[offset]);
|
||||
}
|
||||
|
||||
static int
|
||||
hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
|
||||
struct symbol *sym, u64 ip, char level)
|
||||
static int hist_entry__add(struct thread *thread, struct map *map,
|
||||
struct symbol *sym, u64 ip, u64 count, char level)
|
||||
{
|
||||
struct rb_node **p = &hist.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct hist_entry *he;
|
||||
struct hist_entry entry = {
|
||||
.thread = thread,
|
||||
.map = map,
|
||||
.dso = dso,
|
||||
.sym = sym,
|
||||
.ip = ip,
|
||||
.level = level,
|
||||
.count = 1,
|
||||
};
|
||||
int cmp;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
he = rb_entry(parent, struct hist_entry, rb_node);
|
||||
|
||||
cmp = hist_entry__cmp(&entry, he);
|
||||
|
||||
if (!cmp) {
|
||||
hist_hit(he, ip);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
he = malloc(sizeof(*he));
|
||||
if (!he)
|
||||
bool hit;
|
||||
struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip,
|
||||
count, level, &hit);
|
||||
if (he == NULL)
|
||||
return -ENOMEM;
|
||||
*he = entry;
|
||||
rb_link_node(&he->rb_node, parent, p);
|
||||
rb_insert_color(&he->rb_node, &hist);
|
||||
|
||||
hist_hit(he, ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hist_entry__free(struct hist_entry *he)
|
||||
{
|
||||
free(he);
|
||||
}
|
||||
|
||||
/*
|
||||
* collapse the histogram
|
||||
*/
|
||||
|
||||
static struct rb_root collapse_hists;
|
||||
|
||||
static void collapse__insert_entry(struct hist_entry *he)
|
||||
{
|
||||
struct rb_node **p = &collapse_hists.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct hist_entry *iter;
|
||||
int64_t cmp;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
iter = rb_entry(parent, struct hist_entry, rb_node);
|
||||
|
||||
cmp = hist_entry__collapse(iter, he);
|
||||
|
||||
if (!cmp) {
|
||||
iter->count += he->count;
|
||||
hist_entry__free(he);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&he->rb_node, parent, p);
|
||||
rb_insert_color(&he->rb_node, &collapse_hists);
|
||||
}
|
||||
|
||||
static void collapse__resort(void)
|
||||
{
|
||||
struct rb_node *next;
|
||||
struct hist_entry *n;
|
||||
|
||||
if (!sort__need_collapse)
|
||||
return;
|
||||
|
||||
next = rb_first(&hist);
|
||||
while (next) {
|
||||
n = rb_entry(next, struct hist_entry, rb_node);
|
||||
next = rb_next(&n->rb_node);
|
||||
|
||||
rb_erase(&n->rb_node, &hist);
|
||||
collapse__insert_entry(n);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* reverse the map, sort on count.
|
||||
*/
|
||||
|
||||
static struct rb_root output_hists;
|
||||
|
||||
static void output__insert_entry(struct hist_entry *he)
|
||||
{
|
||||
struct rb_node **p = &output_hists.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct hist_entry *iter;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
iter = rb_entry(parent, struct hist_entry, rb_node);
|
||||
|
||||
if (he->count > iter->count)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&he->rb_node, parent, p);
|
||||
rb_insert_color(&he->rb_node, &output_hists);
|
||||
}
|
||||
|
||||
static void output__resort(void)
|
||||
{
|
||||
struct rb_node *next;
|
||||
struct hist_entry *n;
|
||||
struct rb_root *tree = &hist;
|
||||
|
||||
if (sort__need_collapse)
|
||||
tree = &collapse_hists;
|
||||
|
||||
next = rb_first(tree);
|
||||
|
||||
while (next) {
|
||||
n = rb_entry(next, struct hist_entry, rb_node);
|
||||
next = rb_next(&n->rb_node);
|
||||
|
||||
rb_erase(&n->rb_node, tree);
|
||||
output__insert_entry(n);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long total = 0,
|
||||
total_mmap = 0,
|
||||
total_comm = 0,
|
||||
total_fork = 0,
|
||||
total_unknown = 0;
|
||||
|
||||
static int
|
||||
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
char level;
|
||||
int show = 0;
|
||||
struct dso *dso = NULL;
|
||||
struct thread *thread;
|
||||
u64 ip = event->ip.ip;
|
||||
struct map *map = NULL;
|
||||
|
||||
thread = threads__findnew(event->ip.pid, &threads, &last_match);
|
||||
struct symbol *sym = NULL;
|
||||
struct thread *thread = threads__findnew(event->ip.pid);
|
||||
|
||||
dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
|
||||
(void *)(offset + head),
|
||||
@ -497,60 +146,53 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
event->ip.pid,
|
||||
(void *)(long)ip);
|
||||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (thread == NULL) {
|
||||
fprintf(stderr, "problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
|
||||
show = SHOW_KERNEL;
|
||||
level = 'k';
|
||||
|
||||
dso = kernel_dso;
|
||||
|
||||
dump_printf(" ...... dso: %s\n", dso->name);
|
||||
|
||||
sym = kernel_maps__find_symbol(ip, &map);
|
||||
dump_printf(" ...... dso: %s\n",
|
||||
map ? map->dso->long_name : "<not found>");
|
||||
} else if (event->header.misc & PERF_RECORD_MISC_USER) {
|
||||
|
||||
show = SHOW_USER;
|
||||
level = '.';
|
||||
|
||||
map = thread__find_map(thread, ip);
|
||||
if (map != NULL) {
|
||||
got_map:
|
||||
ip = map->map_ip(map, ip);
|
||||
dso = map->dso;
|
||||
sym = map->dso->find_symbol(map->dso, ip);
|
||||
} else {
|
||||
/*
|
||||
* If this is outside of all known maps,
|
||||
* and is a negative address, try to look it
|
||||
* up in the kernel dso, as it might be a
|
||||
* vsyscall (which executes in user-mode):
|
||||
* vsyscall or vdso (which executes in user-mode).
|
||||
*
|
||||
* XXX This is nasty, we should have a symbol list in
|
||||
* the "[vdso]" dso, but for now lets use the old
|
||||
* trick of looking in the whole kernel symbol list.
|
||||
*/
|
||||
if ((long long)ip < 0)
|
||||
dso = kernel_dso;
|
||||
if ((long long)ip < 0) {
|
||||
map = kernel_map;
|
||||
goto got_map;
|
||||
}
|
||||
}
|
||||
dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
|
||||
|
||||
dump_printf(" ...... dso: %s\n",
|
||||
map ? map->dso->long_name : "<not found>");
|
||||
} else {
|
||||
show = SHOW_HV;
|
||||
level = 'H';
|
||||
dump_printf(" ...... dso: [hypervisor]\n");
|
||||
}
|
||||
|
||||
if (show & show_mask) {
|
||||
struct symbol *sym = NULL;
|
||||
|
||||
if (dso)
|
||||
sym = dso->find_symbol(dso, ip);
|
||||
|
||||
if (hist_entry__add(thread, map, dso, sym, ip, level)) {
|
||||
fprintf(stderr,
|
||||
"problem incrementing symbol count, skipping event\n");
|
||||
return -1;
|
||||
}
|
||||
if (hist_entry__add(thread, map, sym, ip, 1, level)) {
|
||||
fprintf(stderr, "problem incrementing symbol count, "
|
||||
"skipping event\n");
|
||||
return -1;
|
||||
}
|
||||
total++;
|
||||
|
||||
@ -560,10 +202,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
static int
|
||||
process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct map *map = map__new(&event->mmap, NULL, 0);
|
||||
|
||||
thread = threads__findnew(event->mmap.pid, &threads, &last_match);
|
||||
struct map *map = map__new(&event->mmap, NULL, 0,
|
||||
sizeof(struct sym_priv), symbol_filter);
|
||||
struct thread *thread = threads__findnew(event->mmap.pid);
|
||||
|
||||
dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n",
|
||||
(void *)(offset + head),
|
||||
@ -588,9 +229,8 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
static int
|
||||
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct thread *thread = threads__findnew(event->comm.pid);
|
||||
|
||||
thread = threads__findnew(event->comm.pid, &threads, &last_match);
|
||||
dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)(event->header.size),
|
||||
@ -609,11 +249,9 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
static int
|
||||
process_fork_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct thread *parent;
|
||||
struct thread *thread = threads__findnew(event->fork.pid);
|
||||
struct thread *parent = threads__findnew(event->fork.ppid);
|
||||
|
||||
thread = threads__findnew(event->fork.pid, &threads, &last_match);
|
||||
parent = threads__findnew(event->fork.ppid, &threads, &last_match);
|
||||
dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)(event->header.size),
|
||||
@ -665,14 +303,15 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
|
||||
static int parse_line(FILE *file, struct hist_entry *he, u64 len)
|
||||
{
|
||||
struct symbol *sym = he->sym;
|
||||
char *line = NULL, *tmp, *tmp2;
|
||||
static const char *prev_line;
|
||||
static const char *prev_color;
|
||||
unsigned int offset;
|
||||
size_t line_len;
|
||||
u64 start;
|
||||
s64 line_ip;
|
||||
int ret;
|
||||
char *c;
|
||||
@ -709,22 +348,26 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
|
||||
line_ip = -1;
|
||||
}
|
||||
|
||||
start = he->map->unmap_ip(he->map, sym->start);
|
||||
|
||||
if (line_ip != -1) {
|
||||
const char *path = NULL;
|
||||
unsigned int hits = 0;
|
||||
double percent = 0.0;
|
||||
const char *color;
|
||||
struct sym_ext *sym_ext = sym->priv;
|
||||
struct sym_priv *priv = dso__sym_priv(he->map->dso, sym);
|
||||
struct sym_ext *sym_ext = priv->ext;
|
||||
struct sym_hist *h = priv->hist;
|
||||
|
||||
offset = line_ip - start;
|
||||
if (offset < len)
|
||||
hits = sym->hist[offset];
|
||||
hits = h->ip[offset];
|
||||
|
||||
if (offset < len && sym_ext) {
|
||||
path = sym_ext[offset].path;
|
||||
percent = sym_ext[offset].percent;
|
||||
} else if (sym->hist_sum)
|
||||
percent = 100.0 * hits / sym->hist_sum;
|
||||
} else if (h->sum)
|
||||
percent = 100.0 * hits / h->sum;
|
||||
|
||||
color = get_percent_color(percent);
|
||||
|
||||
@ -777,9 +420,10 @@ static void insert_source_line(struct sym_ext *sym_ext)
|
||||
rb_insert_color(&sym_ext->node, &root_sym_ext);
|
||||
}
|
||||
|
||||
static void free_source_line(struct symbol *sym, int len)
|
||||
static void free_source_line(struct hist_entry *he, int len)
|
||||
{
|
||||
struct sym_ext *sym_ext = sym->priv;
|
||||
struct sym_priv *priv = dso__sym_priv(he->map->dso, he->sym);
|
||||
struct sym_ext *sym_ext = priv->ext;
|
||||
int i;
|
||||
|
||||
if (!sym_ext)
|
||||
@ -789,26 +433,30 @@ static void free_source_line(struct symbol *sym, int len)
|
||||
free(sym_ext[i].path);
|
||||
free(sym_ext);
|
||||
|
||||
sym->priv = NULL;
|
||||
priv->ext = NULL;
|
||||
root_sym_ext = RB_ROOT;
|
||||
}
|
||||
|
||||
/* Get the filename:line for the colored entries */
|
||||
static void
|
||||
get_source_line(struct symbol *sym, u64 start, int len, const char *filename)
|
||||
get_source_line(struct hist_entry *he, int len, const char *filename)
|
||||
{
|
||||
struct symbol *sym = he->sym;
|
||||
u64 start;
|
||||
int i;
|
||||
char cmd[PATH_MAX * 2];
|
||||
struct sym_ext *sym_ext;
|
||||
struct sym_priv *priv = dso__sym_priv(he->map->dso, sym);
|
||||
struct sym_hist *h = priv->hist;
|
||||
|
||||
if (!sym->hist_sum)
|
||||
if (!h->sum)
|
||||
return;
|
||||
|
||||
sym->priv = calloc(len, sizeof(struct sym_ext));
|
||||
if (!sym->priv)
|
||||
sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext));
|
||||
if (!priv->ext)
|
||||
return;
|
||||
|
||||
sym_ext = sym->priv;
|
||||
start = he->map->unmap_ip(he->map, sym->start);
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
char *path = NULL;
|
||||
@ -816,7 +464,7 @@ get_source_line(struct symbol *sym, u64 start, int len, const char *filename)
|
||||
u64 offset;
|
||||
FILE *fp;
|
||||
|
||||
sym_ext[i].percent = 100.0 * sym->hist[i] / sym->hist_sum;
|
||||
sym_ext[i].percent = 100.0 * h->ip[i] / h->sum;
|
||||
if (sym_ext[i].percent <= 0.5)
|
||||
continue;
|
||||
|
||||
@ -870,33 +518,34 @@ static void print_summary(const char *filename)
|
||||
}
|
||||
}
|
||||
|
||||
static void annotate_sym(struct dso *dso, struct symbol *sym)
|
||||
static void annotate_sym(struct hist_entry *he)
|
||||
{
|
||||
const char *filename = dso->name, *d_filename;
|
||||
u64 start, end, len;
|
||||
struct map *map = he->map;
|
||||
struct dso *dso = map->dso;
|
||||
struct symbol *sym = he->sym;
|
||||
const char *filename = dso->long_name, *d_filename;
|
||||
u64 len;
|
||||
char command[PATH_MAX*2];
|
||||
FILE *file;
|
||||
|
||||
if (!filename)
|
||||
return;
|
||||
if (sym->module)
|
||||
filename = sym->module->path;
|
||||
else if (dso == kernel_dso)
|
||||
filename = vmlinux_name;
|
||||
|
||||
start = sym->obj_start;
|
||||
if (!start)
|
||||
start = sym->start;
|
||||
if (verbose)
|
||||
fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n",
|
||||
__func__, filename, sym->name,
|
||||
map->unmap_ip(map, sym->start),
|
||||
map->unmap_ip(map, sym->end));
|
||||
|
||||
if (full_paths)
|
||||
d_filename = filename;
|
||||
else
|
||||
d_filename = basename(filename);
|
||||
|
||||
end = start + sym->end - sym->start + 1;
|
||||
len = sym->end - sym->start;
|
||||
|
||||
if (print_line) {
|
||||
get_source_line(sym, start, len, filename);
|
||||
get_source_line(he, len, filename);
|
||||
print_summary(filename);
|
||||
}
|
||||
|
||||
@ -905,10 +554,12 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
|
||||
printf("------------------------------------------------\n");
|
||||
|
||||
if (verbose >= 2)
|
||||
printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
|
||||
printf("annotating [%p] %30s : [%p] %30s\n",
|
||||
dso, dso->long_name, sym, sym->name);
|
||||
|
||||
sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
|
||||
(u64)start, (u64)end, filename, filename);
|
||||
map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end),
|
||||
filename, filename);
|
||||
|
||||
if (verbose >= 3)
|
||||
printf("doing: %s\n", command);
|
||||
@ -918,35 +569,38 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
|
||||
return;
|
||||
|
||||
while (!feof(file)) {
|
||||
if (parse_line(file, sym, start, len) < 0)
|
||||
if (parse_line(file, he, len) < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
pclose(file);
|
||||
if (print_line)
|
||||
free_source_line(sym, len);
|
||||
free_source_line(he, len);
|
||||
}
|
||||
|
||||
static void find_annotations(void)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
struct dso *dso;
|
||||
int count = 0;
|
||||
|
||||
list_for_each_entry(dso, &dsos, node) {
|
||||
for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
|
||||
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
|
||||
struct sym_priv *priv;
|
||||
|
||||
for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) {
|
||||
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
|
||||
if (he->sym == NULL)
|
||||
continue;
|
||||
|
||||
if (sym->hist) {
|
||||
annotate_sym(dso, sym);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
priv = dso__sym_priv(he->map->dso, he->sym);
|
||||
if (priv->hist == NULL)
|
||||
continue;
|
||||
|
||||
annotate_sym(he);
|
||||
/*
|
||||
* Since we have a hist_entry per IP for the same symbol, free
|
||||
* he->sym->hist to signal we already processed this symbol.
|
||||
*/
|
||||
free(priv->hist);
|
||||
priv->hist = NULL;
|
||||
}
|
||||
|
||||
if (!count)
|
||||
printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter);
|
||||
}
|
||||
|
||||
static int __cmd_annotate(void)
|
||||
@ -959,7 +613,7 @@ static int __cmd_annotate(void)
|
||||
uint32_t size;
|
||||
char *buf;
|
||||
|
||||
register_idle_thread(&threads, &last_match);
|
||||
register_idle_thread();
|
||||
|
||||
input = open(input_name, O_RDONLY);
|
||||
if (input < 0) {
|
||||
@ -983,7 +637,7 @@ static int __cmd_annotate(void)
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if (load_kernel() < 0) {
|
||||
if (load_kernel(sizeof(struct sym_priv), symbol_filter) < 0) {
|
||||
perror("failed to load kernel symbols");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
@ -1059,14 +713,14 @@ more:
|
||||
if (dump_trace)
|
||||
return 0;
|
||||
|
||||
if (verbose >= 3)
|
||||
threads__fprintf(stdout, &threads);
|
||||
if (verbose > 3)
|
||||
threads__fprintf(stdout);
|
||||
|
||||
if (verbose >= 2)
|
||||
if (verbose > 2)
|
||||
dsos__fprintf(stdout);
|
||||
|
||||
collapse__resort();
|
||||
output__resort();
|
||||
output__resort(total);
|
||||
|
||||
find_annotations();
|
||||
|
||||
@ -1134,10 +788,13 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
|
||||
sym_hist_filter = argv[0];
|
||||
}
|
||||
|
||||
if (!sym_hist_filter)
|
||||
usage_with_options(annotate_usage, options);
|
||||
|
||||
setup_pager();
|
||||
|
||||
if (field_sep && *field_sep == '.') {
|
||||
fputs("'.' is the only non valid --field-separator argument\n",
|
||||
stderr);
|
||||
exit(129);
|
||||
}
|
||||
|
||||
return __cmd_annotate();
|
||||
}
|
||||
|
@ -17,55 +17,51 @@
|
||||
#include "util/header.h"
|
||||
#include "util/event.h"
|
||||
#include "util/debug.h"
|
||||
#include "util/trace-event.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sched.h>
|
||||
|
||||
#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
|
||||
#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
|
||||
|
||||
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
|
||||
|
||||
static long default_interval = 100000;
|
||||
static long default_interval = 0;
|
||||
|
||||
static int nr_cpus = 0;
|
||||
static int nr_cpus = 0;
|
||||
static unsigned int page_size;
|
||||
static unsigned int mmap_pages = 128;
|
||||
static int freq = 0;
|
||||
static unsigned int mmap_pages = 128;
|
||||
static int freq = 1000;
|
||||
static int output;
|
||||
static const char *output_name = "perf.data";
|
||||
static int group = 0;
|
||||
static unsigned int realtime_prio = 0;
|
||||
static int raw_samples = 0;
|
||||
static int system_wide = 0;
|
||||
static int profile_cpu = -1;
|
||||
static pid_t target_pid = -1;
|
||||
static pid_t child_pid = -1;
|
||||
static int inherit = 1;
|
||||
static int force = 0;
|
||||
static int append_file = 0;
|
||||
static int call_graph = 0;
|
||||
static int inherit_stat = 0;
|
||||
static int no_samples = 0;
|
||||
static int sample_address = 0;
|
||||
static int multiplex = 0;
|
||||
static int multiplex_fd = -1;
|
||||
static int group = 0;
|
||||
static unsigned int realtime_prio = 0;
|
||||
static int raw_samples = 0;
|
||||
static int system_wide = 0;
|
||||
static int profile_cpu = -1;
|
||||
static pid_t target_pid = -1;
|
||||
static pid_t child_pid = -1;
|
||||
static int inherit = 1;
|
||||
static int force = 0;
|
||||
static int append_file = 0;
|
||||
static int call_graph = 0;
|
||||
static int inherit_stat = 0;
|
||||
static int no_samples = 0;
|
||||
static int sample_address = 0;
|
||||
static int multiplex = 0;
|
||||
static int multiplex_fd = -1;
|
||||
|
||||
static long samples;
|
||||
static long samples = 0;
|
||||
static struct timeval last_read;
|
||||
static struct timeval this_read;
|
||||
|
||||
static u64 bytes_written;
|
||||
static u64 bytes_written = 0;
|
||||
|
||||
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
|
||||
|
||||
static int nr_poll;
|
||||
static int nr_cpu;
|
||||
static int nr_poll = 0;
|
||||
static int nr_cpu = 0;
|
||||
|
||||
static int file_new = 1;
|
||||
static int file_new = 1;
|
||||
|
||||
struct perf_header *header;
|
||||
struct perf_header *header = NULL;
|
||||
|
||||
struct mmap_data {
|
||||
int counter;
|
||||
@ -375,9 +371,11 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
|
||||
|
||||
static void create_counter(int counter, int cpu, pid_t pid)
|
||||
{
|
||||
char *filter = filters[counter];
|
||||
struct perf_event_attr *attr = attrs + counter;
|
||||
struct perf_header_attr *h_attr;
|
||||
int track = !counter; /* only the first counter needs these */
|
||||
int ret;
|
||||
struct {
|
||||
u64 count;
|
||||
u64 time_enabled;
|
||||
@ -480,7 +478,6 @@ try_again:
|
||||
multiplex_fd = fd[nr_cpu][counter];
|
||||
|
||||
if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
|
||||
int ret;
|
||||
|
||||
ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
|
||||
assert(ret != -1);
|
||||
@ -500,6 +497,16 @@ try_again:
|
||||
}
|
||||
}
|
||||
|
||||
if (filter != NULL) {
|
||||
ret = ioctl(fd[nr_cpu][counter],
|
||||
PERF_EVENT_IOC_SET_FILTER, filter);
|
||||
if (ret) {
|
||||
error("failed to set filter with %d (%s)\n", errno,
|
||||
strerror(errno));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
|
||||
}
|
||||
|
||||
@ -566,17 +573,17 @@ static int __cmd_record(int argc, const char **argv)
|
||||
else
|
||||
header = perf_header__new();
|
||||
|
||||
|
||||
if (raw_samples) {
|
||||
read_tracing_data(attrs, nr_counters);
|
||||
perf_header__feat_trace_info(header);
|
||||
} else {
|
||||
for (i = 0; i < nr_counters; i++) {
|
||||
if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
|
||||
read_tracing_data(attrs, nr_counters);
|
||||
perf_header__feat_trace_info(header);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atexit(atexit_header);
|
||||
|
||||
if (!system_wide) {
|
||||
@ -623,7 +630,7 @@ static int __cmd_record(int argc, const char **argv)
|
||||
|
||||
param.sched_priority = realtime_prio;
|
||||
if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
|
||||
printf("Could not set realtime priority.\n");
|
||||
pr_err("Could not set realtime priority.\n");
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
@ -677,6 +684,8 @@ static const struct option options[] = {
|
||||
OPT_CALLBACK('e', "event", NULL, "event",
|
||||
"event selector. use 'perf list' to list available events",
|
||||
parse_events),
|
||||
OPT_CALLBACK(0, "filter", NULL, "filter",
|
||||
"event filter", parse_filter),
|
||||
OPT_INTEGER('p', "pid", &target_pid,
|
||||
"record events on existing pid"),
|
||||
OPT_INTEGER('r', "realtime", &realtime_prio,
|
||||
@ -731,6 +740,18 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||
attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
|
||||
}
|
||||
|
||||
/*
|
||||
* User specified count overrides default frequency.
|
||||
*/
|
||||
if (default_interval)
|
||||
freq = 0;
|
||||
else if (freq) {
|
||||
default_interval = freq;
|
||||
} else {
|
||||
fprintf(stderr, "frequency and count are zero, aborting\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
for (counter = 0; counter < nr_counters; counter++) {
|
||||
if (attrs[counter].sample_period)
|
||||
continue;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -11,6 +11,7 @@
|
||||
#include "util/trace-event.h"
|
||||
|
||||
#include "util/debug.h"
|
||||
#include "util/data_map.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/prctl.h>
|
||||
@ -20,26 +21,23 @@
|
||||
#include <math.h>
|
||||
|
||||
static char const *input_name = "perf.data";
|
||||
static int input;
|
||||
static unsigned long page_size;
|
||||
static unsigned long mmap_window = 32;
|
||||
|
||||
static unsigned long total_comm = 0;
|
||||
|
||||
static struct rb_root threads;
|
||||
static struct thread *last_match;
|
||||
|
||||
static struct perf_header *header;
|
||||
static u64 sample_type;
|
||||
|
||||
static char default_sort_order[] = "avg, max, switch, runtime";
|
||||
static char *sort_order = default_sort_order;
|
||||
|
||||
static int profile_cpu = -1;
|
||||
|
||||
static char *cwd;
|
||||
static int cwdlen;
|
||||
|
||||
#define PR_SET_NAME 15 /* Set process name */
|
||||
#define MAX_CPUS 4096
|
||||
|
||||
#define BUG_ON(x) assert(!(x))
|
||||
|
||||
static u64 run_measurement_overhead;
|
||||
static u64 sleep_measurement_overhead;
|
||||
|
||||
@ -74,6 +72,7 @@ enum sched_event_type {
|
||||
SCHED_EVENT_RUN,
|
||||
SCHED_EVENT_SLEEP,
|
||||
SCHED_EVENT_WAKEUP,
|
||||
SCHED_EVENT_MIGRATION,
|
||||
};
|
||||
|
||||
struct sched_atom {
|
||||
@ -398,6 +397,8 @@ process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
|
||||
ret = sem_post(atom->wait_sem);
|
||||
BUG_ON(ret);
|
||||
break;
|
||||
case SCHED_EVENT_MIGRATION:
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
}
|
||||
@ -635,9 +636,7 @@ static void test_calibrations(void)
|
||||
static int
|
||||
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
struct thread *thread;
|
||||
|
||||
thread = threads__findnew(event->comm.pid, &threads, &last_match);
|
||||
struct thread *thread = threads__findnew(event->comm.tid);
|
||||
|
||||
dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
|
||||
(void *)(offset + head),
|
||||
@ -745,6 +744,22 @@ struct trace_fork_event {
|
||||
u32 child_pid;
|
||||
};
|
||||
|
||||
struct trace_migrate_task_event {
|
||||
u32 size;
|
||||
|
||||
u16 common_type;
|
||||
u8 common_flags;
|
||||
u8 common_preempt_count;
|
||||
u32 common_pid;
|
||||
u32 common_tgid;
|
||||
|
||||
char comm[16];
|
||||
u32 pid;
|
||||
|
||||
u32 prio;
|
||||
u32 cpu;
|
||||
};
|
||||
|
||||
struct trace_sched_handler {
|
||||
void (*switch_event)(struct trace_switch_event *,
|
||||
struct event *,
|
||||
@ -769,6 +784,12 @@ struct trace_sched_handler {
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
|
||||
void (*migrate_task_event)(struct trace_migrate_task_event *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
};
|
||||
|
||||
|
||||
@ -1058,8 +1079,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
|
||||
die("hm, delta: %Ld < 0 ?\n", delta);
|
||||
|
||||
|
||||
sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
|
||||
sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
|
||||
sched_out = threads__findnew(switch_event->prev_pid);
|
||||
sched_in = threads__findnew(switch_event->next_pid);
|
||||
|
||||
out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
|
||||
if (!out_events) {
|
||||
@ -1092,13 +1113,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
|
||||
u64 timestamp,
|
||||
struct thread *this_thread __used)
|
||||
{
|
||||
struct work_atoms *atoms;
|
||||
struct thread *thread;
|
||||
struct thread *thread = threads__findnew(runtime_event->pid);
|
||||
struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
|
||||
|
||||
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
|
||||
|
||||
thread = threads__findnew(runtime_event->pid, &threads, &last_match);
|
||||
atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
|
||||
if (!atoms) {
|
||||
thread_atoms_insert(thread);
|
||||
atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
|
||||
@ -1125,7 +1143,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
|
||||
if (!wakeup_event->success)
|
||||
return;
|
||||
|
||||
wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
|
||||
wakee = threads__findnew(wakeup_event->pid);
|
||||
atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
|
||||
if (!atoms) {
|
||||
thread_atoms_insert(wakee);
|
||||
@ -1139,7 +1157,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
|
||||
|
||||
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
|
||||
|
||||
if (atom->state != THREAD_SLEEPING)
|
||||
/*
|
||||
* You WILL be missing events if you've recorded only
|
||||
* one CPU, or are only looking at only one, so don't
|
||||
* make useless noise.
|
||||
*/
|
||||
if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
|
||||
nr_state_machine_bugs++;
|
||||
|
||||
nr_timestamps++;
|
||||
@ -1152,11 +1175,51 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
|
||||
atom->wake_up_time = timestamp;
|
||||
}
|
||||
|
||||
static void
|
||||
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
|
||||
struct event *__event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct work_atoms *atoms;
|
||||
struct work_atom *atom;
|
||||
struct thread *migrant;
|
||||
|
||||
/*
|
||||
* Only need to worry about migration when profiling one CPU.
|
||||
*/
|
||||
if (profile_cpu == -1)
|
||||
return;
|
||||
|
||||
migrant = threads__findnew(migrate_task_event->pid);
|
||||
atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
|
||||
if (!atoms) {
|
||||
thread_atoms_insert(migrant);
|
||||
register_pid(migrant->pid, migrant->comm);
|
||||
atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
|
||||
if (!atoms)
|
||||
die("migration-event: Internal tree error");
|
||||
add_sched_out_event(atoms, 'R', timestamp);
|
||||
}
|
||||
|
||||
BUG_ON(list_empty(&atoms->work_list));
|
||||
|
||||
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
|
||||
atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
|
||||
|
||||
nr_timestamps++;
|
||||
|
||||
if (atom->sched_out_time > timestamp)
|
||||
nr_unordered_timestamps++;
|
||||
}
|
||||
|
||||
static struct trace_sched_handler lat_ops = {
|
||||
.wakeup_event = latency_wakeup_event,
|
||||
.switch_event = latency_switch_event,
|
||||
.runtime_event = latency_runtime_event,
|
||||
.fork_event = latency_fork_event,
|
||||
.migrate_task_event = latency_migrate_task_event,
|
||||
};
|
||||
|
||||
static void output_lat_thread(struct work_atoms *work_list)
|
||||
@ -1385,8 +1448,8 @@ map_switch_event(struct trace_switch_event *switch_event,
|
||||
die("hm, delta: %Ld < 0 ?\n", delta);
|
||||
|
||||
|
||||
sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
|
||||
sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
|
||||
sched_out = threads__findnew(switch_event->prev_pid);
|
||||
sched_in = threads__findnew(switch_event->next_pid);
|
||||
|
||||
curr_thread[this_cpu] = sched_in;
|
||||
|
||||
@ -1516,6 +1579,26 @@ process_sched_exit_event(struct event *event,
|
||||
printf("sched_exit event %p\n", event);
|
||||
}
|
||||
|
||||
static void
|
||||
process_sched_migrate_task_event(struct raw_event_sample *raw,
|
||||
struct event *event,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct trace_migrate_task_event migrate_task_event;
|
||||
|
||||
FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
|
||||
|
||||
FILL_ARRAY(migrate_task_event, comm, event, raw->data);
|
||||
FILL_FIELD(migrate_task_event, pid, event, raw->data);
|
||||
FILL_FIELD(migrate_task_event, prio, event, raw->data);
|
||||
FILL_FIELD(migrate_task_event, cpu, event, raw->data);
|
||||
|
||||
if (trace_handler->migrate_task_event)
|
||||
trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
process_raw_event(event_t *raw_event __used, void *more_data,
|
||||
int cpu, u64 timestamp, struct thread *thread)
|
||||
@ -1539,23 +1622,24 @@ process_raw_event(event_t *raw_event __used, void *more_data,
|
||||
process_sched_fork_event(raw, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_process_exit"))
|
||||
process_sched_exit_event(event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_migrate_task"))
|
||||
process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static int
|
||||
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
char level;
|
||||
int show = 0;
|
||||
struct dso *dso = NULL;
|
||||
struct thread *thread;
|
||||
u64 ip = event->ip.ip;
|
||||
u64 timestamp = -1;
|
||||
u32 cpu = -1;
|
||||
u64 period = 1;
|
||||
void *more_data = event->ip.__more_data;
|
||||
int cpumode;
|
||||
|
||||
thread = threads__findnew(event->ip.pid, &threads, &last_match);
|
||||
if (!(sample_type & PERF_SAMPLE_RAW))
|
||||
return 0;
|
||||
|
||||
thread = threads__findnew(event->ip.pid);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME) {
|
||||
timestamp = *(u64 *)more_data;
|
||||
@ -1581,169 +1665,60 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
(void *)(long)ip,
|
||||
(long long)period);
|
||||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (thread == NULL) {
|
||||
eprintf("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (cpumode == PERF_RECORD_MISC_KERNEL) {
|
||||
show = SHOW_KERNEL;
|
||||
level = 'k';
|
||||
if (profile_cpu != -1 && profile_cpu != (int) cpu)
|
||||
return 0;
|
||||
|
||||
dso = kernel_dso;
|
||||
|
||||
dump_printf(" ...... dso: %s\n", dso->name);
|
||||
|
||||
} else if (cpumode == PERF_RECORD_MISC_USER) {
|
||||
|
||||
show = SHOW_USER;
|
||||
level = '.';
|
||||
|
||||
} else {
|
||||
show = SHOW_HV;
|
||||
level = 'H';
|
||||
|
||||
dso = hypervisor_dso;
|
||||
|
||||
dump_printf(" ...... dso: [hypervisor]\n");
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_RAW)
|
||||
process_raw_event(event, more_data, cpu, timestamp, thread);
|
||||
process_raw_event(event, more_data, cpu, timestamp, thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
process_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
process_lost_event(event_t *event __used,
|
||||
unsigned long offset __used,
|
||||
unsigned long head __used)
|
||||
{
|
||||
trace_event(event);
|
||||
nr_lost_chunks++;
|
||||
nr_lost_events += event->lost.lost;
|
||||
|
||||
nr_events++;
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_MMAP:
|
||||
return 0;
|
||||
case PERF_RECORD_LOST:
|
||||
nr_lost_chunks++;
|
||||
nr_lost_events += event->lost.lost;
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
case PERF_RECORD_COMM:
|
||||
return process_comm_event(event, offset, head);
|
||||
static int sample_type_check(u64 type)
|
||||
{
|
||||
sample_type = type;
|
||||
|
||||
case PERF_RECORD_EXIT ... PERF_RECORD_READ:
|
||||
return 0;
|
||||
|
||||
case PERF_RECORD_SAMPLE:
|
||||
return process_sample_event(event, offset, head);
|
||||
|
||||
case PERF_RECORD_MAX:
|
||||
default:
|
||||
if (!(sample_type & PERF_SAMPLE_RAW)) {
|
||||
fprintf(stderr,
|
||||
"No trace sample to read. Did you call perf record "
|
||||
"without -R?");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_file_handler file_handler = {
|
||||
.process_sample_event = process_sample_event,
|
||||
.process_comm_event = process_comm_event,
|
||||
.process_lost_event = process_lost_event,
|
||||
.sample_type_check = sample_type_check,
|
||||
};
|
||||
|
||||
static int read_events(void)
|
||||
{
|
||||
int ret, rc = EXIT_FAILURE;
|
||||
unsigned long offset = 0;
|
||||
unsigned long head = 0;
|
||||
struct stat perf_stat;
|
||||
event_t *event;
|
||||
uint32_t size;
|
||||
char *buf;
|
||||
register_idle_thread();
|
||||
register_perf_file_handler(&file_handler);
|
||||
|
||||
trace_report();
|
||||
register_idle_thread(&threads, &last_match);
|
||||
|
||||
input = open(input_name, O_RDONLY);
|
||||
if (input < 0) {
|
||||
perror("failed to open file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
ret = fstat(input, &perf_stat);
|
||||
if (ret < 0) {
|
||||
perror("failed to stat file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (!perf_stat.st_size) {
|
||||
fprintf(stderr, "zero-sized file, nothing to do!\n");
|
||||
exit(0);
|
||||
}
|
||||
header = perf_header__read(input);
|
||||
head = header->data_offset;
|
||||
sample_type = perf_header__sample_type(header);
|
||||
|
||||
if (!(sample_type & PERF_SAMPLE_RAW))
|
||||
die("No trace sample to read. Did you call perf record "
|
||||
"without -R?");
|
||||
|
||||
if (load_kernel() < 0) {
|
||||
perror("failed to load kernel symbols");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
remap:
|
||||
buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
|
||||
MAP_SHARED, input, offset);
|
||||
if (buf == MAP_FAILED) {
|
||||
perror("failed to mmap file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
more:
|
||||
event = (event_t *)(buf + head);
|
||||
|
||||
size = event->header.size;
|
||||
if (!size)
|
||||
size = 8;
|
||||
|
||||
if (head + event->header.size >= page_size * mmap_window) {
|
||||
unsigned long shift = page_size * (head / page_size);
|
||||
int res;
|
||||
|
||||
res = munmap(buf, page_size * mmap_window);
|
||||
assert(res == 0);
|
||||
|
||||
offset += shift;
|
||||
head -= shift;
|
||||
goto remap;
|
||||
}
|
||||
|
||||
size = event->header.size;
|
||||
|
||||
|
||||
if (!size || process_event(event, offset, head) < 0) {
|
||||
|
||||
/*
|
||||
* assume we lost track of the stream, check alignment, and
|
||||
* increment a single u64 in the hope to catch on again 'soon'.
|
||||
*/
|
||||
|
||||
if (unlikely(head & 7))
|
||||
head &= ~7ULL;
|
||||
|
||||
size = 8;
|
||||
}
|
||||
|
||||
head += size;
|
||||
|
||||
if (offset + head < (unsigned long)perf_stat.st_size)
|
||||
goto more;
|
||||
|
||||
rc = EXIT_SUCCESS;
|
||||
close(input);
|
||||
|
||||
return rc;
|
||||
return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
|
||||
}
|
||||
|
||||
static void print_bad_events(void)
|
||||
@ -1883,6 +1858,8 @@ static const struct option latency_options[] = {
|
||||
"sort by key(s): runtime, switch, avg, max"),
|
||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
||||
"be more verbose (show symbol address, etc)"),
|
||||
OPT_INTEGER('C', "CPU", &profile_cpu,
|
||||
"CPU to profile on"),
|
||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
||||
"dump raw trace in ASCII"),
|
||||
OPT_END()
|
||||
@ -1961,7 +1938,6 @@ static int __cmd_record(int argc, const char **argv)
|
||||
int cmd_sched(int argc, const char **argv, const char *prefix __used)
|
||||
{
|
||||
symbol__init();
|
||||
page_size = getpagesize();
|
||||
|
||||
argc = parse_options(argc, argv, sched_options, sched_usage,
|
||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
|
@ -50,15 +50,17 @@
|
||||
|
||||
static struct perf_event_attr default_attrs[] = {
|
||||
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
|
||||
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
|
||||
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES},
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES },
|
||||
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
|
||||
|
||||
};
|
||||
|
||||
@ -125,6 +127,7 @@ struct stats event_res_stats[MAX_COUNTERS][3];
|
||||
struct stats runtime_nsecs_stats;
|
||||
struct stats walltime_nsecs_stats;
|
||||
struct stats runtime_cycles_stats;
|
||||
struct stats runtime_branches_stats;
|
||||
|
||||
#define MATCH_EVENT(t, c, counter) \
|
||||
(attrs[counter].type == PERF_TYPE_##t && \
|
||||
@ -235,6 +238,8 @@ static void read_counter(int counter)
|
||||
update_stats(&runtime_nsecs_stats, count[0]);
|
||||
if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
|
||||
update_stats(&runtime_cycles_stats, count[0]);
|
||||
if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
|
||||
update_stats(&runtime_branches_stats, count[0]);
|
||||
}
|
||||
|
||||
static int run_perf_stat(int argc __used, const char **argv)
|
||||
@ -352,6 +357,14 @@ static void abs_printout(int counter, double avg)
|
||||
ratio = avg / total;
|
||||
|
||||
fprintf(stderr, " # %10.3f IPC ", ratio);
|
||||
} else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter)) {
|
||||
total = avg_stats(&runtime_branches_stats);
|
||||
|
||||
if (total)
|
||||
ratio = avg * 100 / total;
|
||||
|
||||
fprintf(stderr, " # %10.3f %% ", ratio);
|
||||
|
||||
} else {
|
||||
total = avg_stats(&runtime_nsecs_stats);
|
||||
|
||||
|
@ -153,6 +153,17 @@ static struct wake_event *wake_events;
|
||||
|
||||
struct sample_wrapper *all_samples;
|
||||
|
||||
|
||||
struct process_filter;
|
||||
struct process_filter {
|
||||
char *name;
|
||||
int pid;
|
||||
struct process_filter *next;
|
||||
};
|
||||
|
||||
static struct process_filter *process_filter;
|
||||
|
||||
|
||||
static struct per_pid *find_create_pid(int pid)
|
||||
{
|
||||
struct per_pid *cursor = all_data;
|
||||
@ -763,21 +774,42 @@ static void draw_wakeups(void)
|
||||
c = p->all;
|
||||
while (c) {
|
||||
if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
|
||||
if (p->pid == we->waker) {
|
||||
if (p->pid == we->waker && !from) {
|
||||
from = c->Y;
|
||||
task_from = c->comm;
|
||||
task_from = strdup(c->comm);
|
||||
}
|
||||
if (p->pid == we->wakee) {
|
||||
if (p->pid == we->wakee && !to) {
|
||||
to = c->Y;
|
||||
task_to = c->comm;
|
||||
task_to = strdup(c->comm);
|
||||
}
|
||||
}
|
||||
c = c->next;
|
||||
}
|
||||
c = p->all;
|
||||
while (c) {
|
||||
if (p->pid == we->waker && !from) {
|
||||
from = c->Y;
|
||||
task_from = strdup(c->comm);
|
||||
}
|
||||
if (p->pid == we->wakee && !to) {
|
||||
to = c->Y;
|
||||
task_to = strdup(c->comm);
|
||||
}
|
||||
c = c->next;
|
||||
}
|
||||
}
|
||||
p = p->next;
|
||||
}
|
||||
|
||||
if (!task_from) {
|
||||
task_from = malloc(40);
|
||||
sprintf(task_from, "[%i]", we->waker);
|
||||
}
|
||||
if (!task_to) {
|
||||
task_to = malloc(40);
|
||||
sprintf(task_to, "[%i]", we->wakee);
|
||||
}
|
||||
|
||||
if (we->waker == -1)
|
||||
svg_interrupt(we->time, to);
|
||||
else if (from && to && abs(from - to) == 1)
|
||||
@ -785,6 +817,9 @@ static void draw_wakeups(void)
|
||||
else
|
||||
svg_partial_wakeline(we->time, from, task_from, to, task_to);
|
||||
we = we->next;
|
||||
|
||||
free(task_from);
|
||||
free(task_to);
|
||||
}
|
||||
}
|
||||
|
||||
@ -858,12 +893,89 @@ static void draw_process_bars(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void add_process_filter(const char *string)
|
||||
{
|
||||
struct process_filter *filt;
|
||||
int pid;
|
||||
|
||||
pid = strtoull(string, NULL, 10);
|
||||
filt = malloc(sizeof(struct process_filter));
|
||||
if (!filt)
|
||||
return;
|
||||
|
||||
filt->name = strdup(string);
|
||||
filt->pid = pid;
|
||||
filt->next = process_filter;
|
||||
|
||||
process_filter = filt;
|
||||
}
|
||||
|
||||
static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
|
||||
{
|
||||
struct process_filter *filt;
|
||||
if (!process_filter)
|
||||
return 1;
|
||||
|
||||
filt = process_filter;
|
||||
while (filt) {
|
||||
if (filt->pid && p->pid == filt->pid)
|
||||
return 1;
|
||||
if (strcmp(filt->name, c->comm) == 0)
|
||||
return 1;
|
||||
filt = filt->next;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int determine_display_tasks_filtered(void)
|
||||
{
|
||||
struct per_pid *p;
|
||||
struct per_pidcomm *c;
|
||||
int count = 0;
|
||||
|
||||
p = all_data;
|
||||
while (p) {
|
||||
p->display = 0;
|
||||
if (p->start_time == 1)
|
||||
p->start_time = first_time;
|
||||
|
||||
/* no exit marker, task kept running to the end */
|
||||
if (p->end_time == 0)
|
||||
p->end_time = last_time;
|
||||
|
||||
c = p->all;
|
||||
|
||||
while (c) {
|
||||
c->display = 0;
|
||||
|
||||
if (c->start_time == 1)
|
||||
c->start_time = first_time;
|
||||
|
||||
if (passes_filter(p, c)) {
|
||||
c->display = 1;
|
||||
p->display = 1;
|
||||
count++;
|
||||
}
|
||||
|
||||
if (c->end_time == 0)
|
||||
c->end_time = last_time;
|
||||
|
||||
c = c->next;
|
||||
}
|
||||
p = p->next;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static int determine_display_tasks(u64 threshold)
|
||||
{
|
||||
struct per_pid *p;
|
||||
struct per_pidcomm *c;
|
||||
int count = 0;
|
||||
|
||||
if (process_filter)
|
||||
return determine_display_tasks_filtered();
|
||||
|
||||
p = all_data;
|
||||
while (p) {
|
||||
p->display = 0;
|
||||
@ -1050,12 +1162,10 @@ more:
|
||||
size = event->header.size;
|
||||
|
||||
if (!size || process_event(event) < 0) {
|
||||
|
||||
printf("%p [%p]: skipping unknown header type: %d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)(event->header.size),
|
||||
event->header.type);
|
||||
|
||||
pr_warning("%p [%p]: skipping unknown header type: %d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)(event->header.size),
|
||||
event->header.type);
|
||||
/*
|
||||
* assume we lost track of the stream, check alignment, and
|
||||
* increment a single u64 in the hope to catch on again 'soon'.
|
||||
@ -1088,7 +1198,8 @@ done:
|
||||
|
||||
write_svg_file(output_name);
|
||||
|
||||
printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name);
|
||||
pr_info("Written %2.1f seconds of trace to %s.\n",
|
||||
(last_time - first_time) / 1000000000.0, output_name);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1129,6 +1240,14 @@ static int __cmd_record(int argc, const char **argv)
|
||||
return cmd_record(i, rec_argv, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
parse_process(const struct option *opt __used, const char *arg, int __used unset)
|
||||
{
|
||||
if (arg)
|
||||
add_process_filter(arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct option options[] = {
|
||||
OPT_STRING('i', "input", &input_name, "file",
|
||||
"input file name"),
|
||||
@ -1136,8 +1255,11 @@ static const struct option options[] = {
|
||||
"output file name"),
|
||||
OPT_INTEGER('w', "width", &svg_page_width,
|
||||
"page width"),
|
||||
OPT_BOOLEAN('p', "power-only", &power_only,
|
||||
OPT_BOOLEAN('P', "power-only", &power_only,
|
||||
"output power data only"),
|
||||
OPT_CALLBACK('p', "process", NULL, "process",
|
||||
"process selector. Pass a pid or process name.",
|
||||
parse_process),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#include "util/symbol.h"
|
||||
#include "util/color.h"
|
||||
#include "util/thread.h"
|
||||
#include "util/util.h"
|
||||
#include <linux/rbtree.h>
|
||||
#include "util/parse-options.h"
|
||||
@ -54,26 +55,26 @@
|
||||
|
||||
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
|
||||
|
||||
static int system_wide = 0;
|
||||
static int system_wide = 0;
|
||||
|
||||
static int default_interval = 100000;
|
||||
static int default_interval = 0;
|
||||
|
||||
static int count_filter = 5;
|
||||
static int print_entries = 15;
|
||||
static int count_filter = 5;
|
||||
static int print_entries = 15;
|
||||
|
||||
static int target_pid = -1;
|
||||
static int inherit = 0;
|
||||
static int profile_cpu = -1;
|
||||
static int nr_cpus = 0;
|
||||
static unsigned int realtime_prio = 0;
|
||||
static int group = 0;
|
||||
static int target_pid = -1;
|
||||
static int inherit = 0;
|
||||
static int profile_cpu = -1;
|
||||
static int nr_cpus = 0;
|
||||
static unsigned int realtime_prio = 0;
|
||||
static int group = 0;
|
||||
static unsigned int page_size;
|
||||
static unsigned int mmap_pages = 16;
|
||||
static int freq = 0;
|
||||
static unsigned int mmap_pages = 16;
|
||||
static int freq = 1000; /* 1 KHz */
|
||||
|
||||
static int delay_secs = 2;
|
||||
static int zero;
|
||||
static int dump_symtab;
|
||||
static int delay_secs = 2;
|
||||
static int zero = 0;
|
||||
static int dump_symtab = 0;
|
||||
|
||||
/*
|
||||
* Source
|
||||
@ -86,19 +87,16 @@ struct source_line {
|
||||
struct source_line *next;
|
||||
};
|
||||
|
||||
static char *sym_filter = NULL;
|
||||
struct sym_entry *sym_filter_entry = NULL;
|
||||
static int sym_pcnt_filter = 5;
|
||||
static int sym_counter = 0;
|
||||
static int display_weighted = -1;
|
||||
static char *sym_filter = NULL;
|
||||
struct sym_entry *sym_filter_entry = NULL;
|
||||
static int sym_pcnt_filter = 5;
|
||||
static int sym_counter = 0;
|
||||
static int display_weighted = -1;
|
||||
|
||||
/*
|
||||
* Symbols
|
||||
*/
|
||||
|
||||
static u64 min_ip;
|
||||
static u64 max_ip = -1ll;
|
||||
|
||||
struct sym_entry {
|
||||
struct rb_node rb_node;
|
||||
struct list_head node;
|
||||
@ -106,6 +104,7 @@ struct sym_entry {
|
||||
unsigned long snap_count;
|
||||
double weight;
|
||||
int skip;
|
||||
struct map *map;
|
||||
struct source_line *source;
|
||||
struct source_line *lines;
|
||||
struct source_line **lines_tail;
|
||||
@ -119,12 +118,11 @@ struct sym_entry {
|
||||
static void parse_source(struct sym_entry *syme)
|
||||
{
|
||||
struct symbol *sym;
|
||||
struct module *module;
|
||||
struct section *section = NULL;
|
||||
struct map *map;
|
||||
FILE *file;
|
||||
char command[PATH_MAX*2];
|
||||
const char *path = vmlinux_name;
|
||||
u64 start, end, len;
|
||||
const char *path;
|
||||
u64 len;
|
||||
|
||||
if (!syme)
|
||||
return;
|
||||
@ -135,27 +133,16 @@ static void parse_source(struct sym_entry *syme)
|
||||
}
|
||||
|
||||
sym = (struct symbol *)(syme + 1);
|
||||
module = sym->module;
|
||||
map = syme->map;
|
||||
path = map->dso->long_name;
|
||||
|
||||
if (module)
|
||||
path = module->path;
|
||||
if (!path)
|
||||
return;
|
||||
|
||||
start = sym->obj_start;
|
||||
if (!start)
|
||||
start = sym->start;
|
||||
|
||||
if (module) {
|
||||
section = module->sections->find_section(module->sections, ".text");
|
||||
if (section)
|
||||
start -= section->vma;
|
||||
}
|
||||
|
||||
end = start + sym->end - sym->start + 1;
|
||||
len = sym->end - sym->start;
|
||||
|
||||
sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path);
|
||||
sprintf(command,
|
||||
"objdump --start-address=0x%016Lx "
|
||||
"--stop-address=0x%016Lx -dS %s",
|
||||
map->unmap_ip(map, sym->start),
|
||||
map->unmap_ip(map, sym->end), path);
|
||||
|
||||
file = popen(command, "r");
|
||||
if (!file)
|
||||
@ -187,13 +174,11 @@ static void parse_source(struct sym_entry *syme)
|
||||
|
||||
if (strlen(src->line)>8 && src->line[8] == ':') {
|
||||
src->eip = strtoull(src->line, NULL, 16);
|
||||
if (section)
|
||||
src->eip += section->vma;
|
||||
src->eip = map->unmap_ip(map, src->eip);
|
||||
}
|
||||
if (strlen(src->line)>8 && src->line[16] == ':') {
|
||||
src->eip = strtoull(src->line, NULL, 16);
|
||||
if (section)
|
||||
src->eip += section->vma;
|
||||
src->eip = map->unmap_ip(map, src->eip);
|
||||
}
|
||||
}
|
||||
pclose(file);
|
||||
@ -245,16 +230,9 @@ static void lookup_sym_source(struct sym_entry *syme)
|
||||
struct symbol *symbol = (struct symbol *)(syme + 1);
|
||||
struct source_line *line;
|
||||
char pattern[PATH_MAX];
|
||||
char *idx;
|
||||
|
||||
sprintf(pattern, "<%s>:", symbol->name);
|
||||
|
||||
if (symbol->module) {
|
||||
idx = strstr(pattern, "\t");
|
||||
if (idx)
|
||||
*idx = 0;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&syme->source_lock);
|
||||
for (line = syme->lines; line; line = line->next) {
|
||||
if (strstr(line->line, pattern)) {
|
||||
@ -516,8 +494,8 @@ static void print_sym_table(void)
|
||||
if (verbose)
|
||||
printf(" - %016llx", sym->start);
|
||||
printf(" : %s", sym->name);
|
||||
if (sym->module)
|
||||
printf("\t[%s]", sym->module->name);
|
||||
if (syme->map->dso->name[0] == '[')
|
||||
printf(" \t%s", syme->map->dso->name);
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
@ -686,6 +664,8 @@ static void handle_keypress(int c)
|
||||
switch (c) {
|
||||
case 'd':
|
||||
prompt_integer(&delay_secs, "Enter display delay");
|
||||
if (delay_secs < 1)
|
||||
delay_secs = 1;
|
||||
break;
|
||||
case 'e':
|
||||
prompt_integer(&print_entries, "Enter display entries (lines)");
|
||||
@ -788,7 +768,7 @@ static const char *skip_symbols[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static int symbol_filter(struct dso *self, struct symbol *sym)
|
||||
static int symbol_filter(struct map *map, struct symbol *sym)
|
||||
{
|
||||
struct sym_entry *syme;
|
||||
const char *name = sym->name;
|
||||
@ -810,7 +790,8 @@ static int symbol_filter(struct dso *self, struct symbol *sym)
|
||||
strstr(name, "_text_end"))
|
||||
return 1;
|
||||
|
||||
syme = dso__sym_priv(self, sym);
|
||||
syme = dso__sym_priv(map->dso, sym);
|
||||
syme->map = map;
|
||||
pthread_mutex_init(&syme->source_lock, NULL);
|
||||
if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
|
||||
sym_filter_entry = syme;
|
||||
@ -827,34 +808,14 @@ static int symbol_filter(struct dso *self, struct symbol *sym)
|
||||
|
||||
static int parse_symbols(void)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct symbol *sym;
|
||||
int use_modules = vmlinux_name ? 1 : 0;
|
||||
|
||||
kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
|
||||
if (kernel_dso == NULL)
|
||||
if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry),
|
||||
symbol_filter, 1) <= 0)
|
||||
return -1;
|
||||
|
||||
if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0)
|
||||
goto out_delete_dso;
|
||||
|
||||
node = rb_first(&kernel_dso->syms);
|
||||
sym = rb_entry(node, struct symbol, rb_node);
|
||||
min_ip = sym->start;
|
||||
|
||||
node = rb_last(&kernel_dso->syms);
|
||||
sym = rb_entry(node, struct symbol, rb_node);
|
||||
max_ip = sym->end;
|
||||
|
||||
if (dump_symtab)
|
||||
dso__fprintf(kernel_dso, stderr);
|
||||
dsos__fprintf(stderr);
|
||||
|
||||
return 0;
|
||||
|
||||
out_delete_dso:
|
||||
dso__delete(kernel_dso);
|
||||
kernel_dso = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -862,10 +823,11 @@ out_delete_dso:
|
||||
*/
|
||||
static void record_ip(u64 ip, int counter)
|
||||
{
|
||||
struct symbol *sym = dso__find_symbol(kernel_dso, ip);
|
||||
struct map *map;
|
||||
struct symbol *sym = kernel_maps__find_symbol(ip, &map);
|
||||
|
||||
if (sym != NULL) {
|
||||
struct sym_entry *syme = dso__sym_priv(kernel_dso, sym);
|
||||
struct sym_entry *syme = dso__sym_priv(map->dso, sym);
|
||||
|
||||
if (!syme->skip) {
|
||||
syme->count[counter]++;
|
||||
@ -911,8 +873,6 @@ static unsigned int mmap_read_head(struct mmap_data *md)
|
||||
return head;
|
||||
}
|
||||
|
||||
struct timeval last_read, this_read;
|
||||
|
||||
static void mmap_read_counter(struct mmap_data *md)
|
||||
{
|
||||
unsigned int head = mmap_read_head(md);
|
||||
@ -920,8 +880,6 @@ static void mmap_read_counter(struct mmap_data *md)
|
||||
unsigned char *data = md->base + page_size;
|
||||
int diff;
|
||||
|
||||
gettimeofday(&this_read, NULL);
|
||||
|
||||
/*
|
||||
* If we're further behind than half the buffer, there's a chance
|
||||
* the writer will bite our tail and mess up the samples under us.
|
||||
@ -932,14 +890,7 @@ static void mmap_read_counter(struct mmap_data *md)
|
||||
*/
|
||||
diff = head - old;
|
||||
if (diff > md->mask / 2 || diff < 0) {
|
||||
struct timeval iv;
|
||||
unsigned long msecs;
|
||||
|
||||
timersub(&this_read, &last_read, &iv);
|
||||
msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
|
||||
|
||||
fprintf(stderr, "WARNING: failed to keep up with mmap data."
|
||||
" Last read %lu msecs ago.\n", msecs);
|
||||
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
|
||||
|
||||
/*
|
||||
* head points to a known good entry, start there.
|
||||
@ -947,8 +898,6 @@ static void mmap_read_counter(struct mmap_data *md)
|
||||
old = head;
|
||||
}
|
||||
|
||||
last_read = this_read;
|
||||
|
||||
for (; old != head;) {
|
||||
event_t *event = (event_t *)&data[old & md->mask];
|
||||
|
||||
@ -1016,7 +965,13 @@ static void start_counter(int i, int counter)
|
||||
attr = attrs + counter;
|
||||
|
||||
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
|
||||
attr->freq = freq;
|
||||
|
||||
if (freq) {
|
||||
attr->sample_type |= PERF_SAMPLE_PERIOD;
|
||||
attr->freq = 1;
|
||||
attr->sample_freq = freq;
|
||||
}
|
||||
|
||||
attr->inherit = (cpu < 0) && inherit;
|
||||
|
||||
try_again:
|
||||
@ -1171,11 +1126,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||
if (argc)
|
||||
usage_with_options(top_usage, options);
|
||||
|
||||
if (freq) {
|
||||
default_interval = freq;
|
||||
freq = 1;
|
||||
}
|
||||
|
||||
/* CPU and PID are mutually exclusive */
|
||||
if (target_pid != -1 && profile_cpu != -1) {
|
||||
printf("WARNING: PID switch overriding CPU\n");
|
||||
@ -1192,6 +1142,19 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||
parse_symbols();
|
||||
parse_source(sym_filter_entry);
|
||||
|
||||
|
||||
/*
|
||||
* User specified count overrides default frequency.
|
||||
*/
|
||||
if (default_interval)
|
||||
freq = 0;
|
||||
else if (freq) {
|
||||
default_interval = freq;
|
||||
} else {
|
||||
fprintf(stderr, "frequency and count are zero, aborting\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the ones not specifically initialized via -c:
|
||||
*/
|
||||
|
@ -12,28 +12,24 @@
|
||||
#include "util/debug.h"
|
||||
|
||||
#include "util/trace-event.h"
|
||||
#include "util/data_map.h"
|
||||
|
||||
static char const *input_name = "perf.data";
|
||||
static int input;
|
||||
static unsigned long page_size;
|
||||
static unsigned long mmap_window = 32;
|
||||
|
||||
static unsigned long total = 0;
|
||||
static unsigned long total_comm = 0;
|
||||
|
||||
static struct rb_root threads;
|
||||
static struct thread *last_match;
|
||||
|
||||
static struct perf_header *header;
|
||||
static u64 sample_type;
|
||||
|
||||
static char *cwd;
|
||||
static int cwdlen;
|
||||
|
||||
|
||||
static int
|
||||
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
struct thread *thread;
|
||||
|
||||
thread = threads__findnew(event->comm.pid, &threads, &last_match);
|
||||
struct thread *thread = threads__findnew(event->comm.pid);
|
||||
|
||||
dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
|
||||
(void *)(offset + head),
|
||||
@ -53,18 +49,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
static int
|
||||
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
char level;
|
||||
int show = 0;
|
||||
struct dso *dso = NULL;
|
||||
struct thread *thread;
|
||||
u64 ip = event->ip.ip;
|
||||
u64 timestamp = -1;
|
||||
u32 cpu = -1;
|
||||
u64 period = 1;
|
||||
void *more_data = event->ip.__more_data;
|
||||
int cpumode;
|
||||
|
||||
thread = threads__findnew(event->ip.pid, &threads, &last_match);
|
||||
struct thread *thread = threads__findnew(event->ip.pid);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME) {
|
||||
timestamp = *(u64 *)more_data;
|
||||
@ -90,37 +80,13 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
(void *)(long)ip,
|
||||
(long long)period);
|
||||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (thread == NULL) {
|
||||
eprintf("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
|
||||
if (cpumode == PERF_RECORD_MISC_KERNEL) {
|
||||
show = SHOW_KERNEL;
|
||||
level = 'k';
|
||||
|
||||
dso = kernel_dso;
|
||||
|
||||
dump_printf(" ...... dso: %s\n", dso->name);
|
||||
|
||||
} else if (cpumode == PERF_RECORD_MISC_USER) {
|
||||
|
||||
show = SHOW_USER;
|
||||
level = '.';
|
||||
|
||||
} else {
|
||||
show = SHOW_HV;
|
||||
level = 'H';
|
||||
|
||||
dso = hypervisor_dso;
|
||||
|
||||
dump_printf(" ...... dso: [hypervisor]\n");
|
||||
}
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_RAW) {
|
||||
struct {
|
||||
@ -140,121 +106,32 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
process_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
static int sample_type_check(u64 type)
|
||||
{
|
||||
trace_event(event);
|
||||
sample_type = type;
|
||||
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_MMAP ... PERF_RECORD_LOST:
|
||||
return 0;
|
||||
|
||||
case PERF_RECORD_COMM:
|
||||
return process_comm_event(event, offset, head);
|
||||
|
||||
case PERF_RECORD_EXIT ... PERF_RECORD_READ:
|
||||
return 0;
|
||||
|
||||
case PERF_RECORD_SAMPLE:
|
||||
return process_sample_event(event, offset, head);
|
||||
|
||||
case PERF_RECORD_MAX:
|
||||
default:
|
||||
if (!(sample_type & PERF_SAMPLE_RAW)) {
|
||||
fprintf(stderr,
|
||||
"No trace sample to read. Did you call perf record "
|
||||
"without -R?");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_file_handler file_handler = {
|
||||
.process_sample_event = process_sample_event,
|
||||
.process_comm_event = process_comm_event,
|
||||
.sample_type_check = sample_type_check,
|
||||
};
|
||||
|
||||
static int __cmd_trace(void)
|
||||
{
|
||||
int ret, rc = EXIT_FAILURE;
|
||||
unsigned long offset = 0;
|
||||
unsigned long head = 0;
|
||||
struct stat perf_stat;
|
||||
event_t *event;
|
||||
uint32_t size;
|
||||
char *buf;
|
||||
register_idle_thread();
|
||||
register_perf_file_handler(&file_handler);
|
||||
|
||||
trace_report();
|
||||
register_idle_thread(&threads, &last_match);
|
||||
|
||||
input = open(input_name, O_RDONLY);
|
||||
if (input < 0) {
|
||||
perror("failed to open file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
ret = fstat(input, &perf_stat);
|
||||
if (ret < 0) {
|
||||
perror("failed to stat file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (!perf_stat.st_size) {
|
||||
fprintf(stderr, "zero-sized file, nothing to do!\n");
|
||||
exit(0);
|
||||
}
|
||||
header = perf_header__read(input);
|
||||
head = header->data_offset;
|
||||
sample_type = perf_header__sample_type(header);
|
||||
|
||||
if (!(sample_type & PERF_SAMPLE_RAW))
|
||||
die("No trace sample to read. Did you call perf record "
|
||||
"without -R?");
|
||||
|
||||
if (load_kernel() < 0) {
|
||||
perror("failed to load kernel symbols");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
remap:
|
||||
buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
|
||||
MAP_SHARED, input, offset);
|
||||
if (buf == MAP_FAILED) {
|
||||
perror("failed to mmap file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
more:
|
||||
event = (event_t *)(buf + head);
|
||||
|
||||
if (head + event->header.size >= page_size * mmap_window) {
|
||||
unsigned long shift = page_size * (head / page_size);
|
||||
int res;
|
||||
|
||||
res = munmap(buf, page_size * mmap_window);
|
||||
assert(res == 0);
|
||||
|
||||
offset += shift;
|
||||
head -= shift;
|
||||
goto remap;
|
||||
}
|
||||
|
||||
size = event->header.size;
|
||||
|
||||
if (!size || process_event(event, offset, head) < 0) {
|
||||
|
||||
/*
|
||||
* assume we lost track of the stream, check alignment, and
|
||||
* increment a single u64 in the hope to catch on again 'soon'.
|
||||
*/
|
||||
|
||||
if (unlikely(head & 7))
|
||||
head &= ~7ULL;
|
||||
|
||||
size = 8;
|
||||
}
|
||||
|
||||
head += size;
|
||||
|
||||
if (offset + head < (unsigned long)perf_stat.st_size)
|
||||
goto more;
|
||||
|
||||
rc = EXIT_SUCCESS;
|
||||
close(input);
|
||||
|
||||
return rc;
|
||||
return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
|
||||
}
|
||||
|
||||
static const char * const annotate_usage[] = {
|
||||
@ -267,13 +144,14 @@ static const struct option options[] = {
|
||||
"dump raw trace in ASCII"),
|
||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
||||
"be more verbose (show symbol address, etc)"),
|
||||
OPT_BOOLEAN('l', "latency", &latency_format,
|
||||
"show latency attributes (irqs/preemption disabled, etc)"),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
int cmd_trace(int argc, const char **argv, const char *prefix __used)
|
||||
{
|
||||
symbol__init();
|
||||
page_size = getpagesize();
|
||||
|
||||
argc = parse_options(argc, argv, options, annotate_usage, 0);
|
||||
if (argc) {
|
||||
|
@ -89,8 +89,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||
/*
|
||||
* Check remaining flags.
|
||||
*/
|
||||
if (!prefixcmp(cmd, "--exec-path")) {
|
||||
cmd += 11;
|
||||
if (!prefixcmp(cmd, CMD_EXEC_PATH)) {
|
||||
cmd += strlen(CMD_EXEC_PATH);
|
||||
if (*cmd == '=')
|
||||
perf_set_argv_exec_path(cmd + 1);
|
||||
else {
|
||||
@ -117,8 +117,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||
(*argv)++;
|
||||
(*argc)--;
|
||||
handled++;
|
||||
} else if (!prefixcmp(cmd, "--perf-dir=")) {
|
||||
setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1);
|
||||
} else if (!prefixcmp(cmd, CMD_PERF_DIR)) {
|
||||
setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1);
|
||||
if (envchanged)
|
||||
*envchanged = 1;
|
||||
} else if (!strcmp(cmd, "--work-tree")) {
|
||||
@ -131,8 +131,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||
*envchanged = 1;
|
||||
(*argv)++;
|
||||
(*argc)--;
|
||||
} else if (!prefixcmp(cmd, "--work-tree=")) {
|
||||
setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1);
|
||||
} else if (!prefixcmp(cmd, CMD_WORK_TREE)) {
|
||||
setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1);
|
||||
if (envchanged)
|
||||
*envchanged = 1;
|
||||
} else if (!strcmp(cmd, "--debugfs-dir")) {
|
||||
@ -146,8 +146,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||
*envchanged = 1;
|
||||
(*argv)++;
|
||||
(*argc)--;
|
||||
} else if (!prefixcmp(cmd, "--debugfs-dir=")) {
|
||||
strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN);
|
||||
} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
|
||||
strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN);
|
||||
debugfs_mntpt[MAXPATHLEN - 1] = '\0';
|
||||
if (envchanged)
|
||||
*envchanged = 1;
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
GVF=PERF-VERSION-FILE
|
||||
DEF_VER=v0.0.1.PERF
|
||||
DEF_VER=v0.0.2.PERF
|
||||
|
||||
LF='
|
||||
'
|
||||
|
@ -1,10 +1,15 @@
|
||||
#ifndef CACHE_H
|
||||
#define CACHE_H
|
||||
#ifndef __PERF_CACHE_H
|
||||
#define __PERF_CACHE_H
|
||||
|
||||
#include "util.h"
|
||||
#include "strbuf.h"
|
||||
#include "../perf.h"
|
||||
|
||||
#define CMD_EXEC_PATH "--exec-path"
|
||||
#define CMD_PERF_DIR "--perf-dir="
|
||||
#define CMD_WORK_TREE "--work-tree="
|
||||
#define CMD_DEBUGFS_DIR "--debugfs-dir="
|
||||
|
||||
#define PERF_DIR_ENVIRONMENT "PERF_DIR"
|
||||
#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
|
||||
#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
|
||||
@ -117,4 +122,4 @@ extern char *perf_pathdup(const char *fmt, ...)
|
||||
|
||||
extern size_t strlcpy(char *dest, const char *src, size_t size);
|
||||
|
||||
#endif /* CACHE_H */
|
||||
#endif /* __PERF_CACHE_H */
|
||||
|
@ -206,7 +206,7 @@ fill_node(struct callchain_node *node, struct ip_callchain *chain,
|
||||
}
|
||||
node->val_nr = chain->nr - start;
|
||||
if (!node->val_nr)
|
||||
printf("Warning: empty node in callchain tree\n");
|
||||
pr_warning("Warning: empty node in callchain tree\n");
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -58,4 +58,4 @@ static inline u64 cumul_hits(struct callchain_node *node)
|
||||
int register_callchain_param(struct callchain_param *param);
|
||||
void append_chain(struct callchain_node *root, struct ip_callchain *chain,
|
||||
struct symbol **syms);
|
||||
#endif
|
||||
#endif /* __PERF_CALLCHAIN_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef COLOR_H
|
||||
#define COLOR_H
|
||||
#ifndef __PERF_COLOR_H
|
||||
#define __PERF_COLOR_H
|
||||
|
||||
/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
|
||||
#define COLOR_MAXLEN 24
|
||||
@ -39,4 +39,4 @@ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *bu
|
||||
int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
|
||||
const char *get_percent_color(double percent);
|
||||
|
||||
#endif /* COLOR_H */
|
||||
#endif /* __PERF_COLOR_H */
|
||||
|
222
tools/perf/util/data_map.c
Normal file
222
tools/perf/util/data_map.c
Normal file
@ -0,0 +1,222 @@
|
||||
#include "data_map.h"
|
||||
#include "symbol.h"
|
||||
#include "util.h"
|
||||
#include "debug.h"
|
||||
|
||||
|
||||
static struct perf_file_handler *curr_handler;
|
||||
static unsigned long mmap_window = 32;
|
||||
static char __cwd[PATH_MAX];
|
||||
|
||||
static int
|
||||
process_event_stub(event_t *event __used,
|
||||
unsigned long offset __used,
|
||||
unsigned long head __used)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void register_perf_file_handler(struct perf_file_handler *handler)
|
||||
{
|
||||
if (!handler->process_sample_event)
|
||||
handler->process_sample_event = process_event_stub;
|
||||
if (!handler->process_mmap_event)
|
||||
handler->process_mmap_event = process_event_stub;
|
||||
if (!handler->process_comm_event)
|
||||
handler->process_comm_event = process_event_stub;
|
||||
if (!handler->process_fork_event)
|
||||
handler->process_fork_event = process_event_stub;
|
||||
if (!handler->process_exit_event)
|
||||
handler->process_exit_event = process_event_stub;
|
||||
if (!handler->process_lost_event)
|
||||
handler->process_lost_event = process_event_stub;
|
||||
if (!handler->process_read_event)
|
||||
handler->process_read_event = process_event_stub;
|
||||
if (!handler->process_throttle_event)
|
||||
handler->process_throttle_event = process_event_stub;
|
||||
if (!handler->process_unthrottle_event)
|
||||
handler->process_unthrottle_event = process_event_stub;
|
||||
|
||||
curr_handler = handler;
|
||||
}
|
||||
|
||||
static int
|
||||
process_event(event_t *event, unsigned long offset, unsigned long head)
|
||||
{
|
||||
trace_event(event);
|
||||
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_SAMPLE:
|
||||
return curr_handler->process_sample_event(event, offset, head);
|
||||
case PERF_RECORD_MMAP:
|
||||
return curr_handler->process_mmap_event(event, offset, head);
|
||||
case PERF_RECORD_COMM:
|
||||
return curr_handler->process_comm_event(event, offset, head);
|
||||
case PERF_RECORD_FORK:
|
||||
return curr_handler->process_fork_event(event, offset, head);
|
||||
case PERF_RECORD_EXIT:
|
||||
return curr_handler->process_exit_event(event, offset, head);
|
||||
case PERF_RECORD_LOST:
|
||||
return curr_handler->process_lost_event(event, offset, head);
|
||||
case PERF_RECORD_READ:
|
||||
return curr_handler->process_read_event(event, offset, head);
|
||||
case PERF_RECORD_THROTTLE:
|
||||
return curr_handler->process_throttle_event(event, offset, head);
|
||||
case PERF_RECORD_UNTHROTTLE:
|
||||
return curr_handler->process_unthrottle_event(event, offset, head);
|
||||
default:
|
||||
curr_handler->total_unknown++;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int mmap_dispatch_perf_file(struct perf_header **pheader,
|
||||
const char *input_name,
|
||||
int force,
|
||||
int full_paths,
|
||||
int *cwdlen,
|
||||
char **cwd)
|
||||
{
|
||||
int ret, rc = EXIT_FAILURE;
|
||||
struct perf_header *header;
|
||||
unsigned long head, shift;
|
||||
unsigned long offset = 0;
|
||||
struct stat input_stat;
|
||||
size_t page_size;
|
||||
u64 sample_type;
|
||||
event_t *event;
|
||||
uint32_t size;
|
||||
int input;
|
||||
char *buf;
|
||||
|
||||
if (!curr_handler)
|
||||
die("Forgot to register perf file handler");
|
||||
|
||||
page_size = getpagesize();
|
||||
|
||||
input = open(input_name, O_RDONLY);
|
||||
if (input < 0) {
|
||||
fprintf(stderr, " failed to open file: %s", input_name);
|
||||
if (!strcmp(input_name, "perf.data"))
|
||||
fprintf(stderr, " (try 'perf record' first)");
|
||||
fprintf(stderr, "\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
ret = fstat(input, &input_stat);
|
||||
if (ret < 0) {
|
||||
perror("failed to stat file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
|
||||
fprintf(stderr, "file: %s not owned by current user or root\n",
|
||||
input_name);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (!input_stat.st_size) {
|
||||
fprintf(stderr, "zero-sized file, nothing to do!\n");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
*pheader = perf_header__read(input);
|
||||
header = *pheader;
|
||||
head = header->data_offset;
|
||||
|
||||
sample_type = perf_header__sample_type(header);
|
||||
|
||||
if (curr_handler->sample_type_check)
|
||||
if (curr_handler->sample_type_check(sample_type) < 0)
|
||||
exit(-1);
|
||||
|
||||
if (load_kernel(0, NULL) < 0) {
|
||||
perror("failed to load kernel symbols");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if (!full_paths) {
|
||||
if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
|
||||
perror("failed to get the current directory");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
*cwd = __cwd;
|
||||
*cwdlen = strlen(*cwd);
|
||||
} else {
|
||||
*cwd = NULL;
|
||||
*cwdlen = 0;
|
||||
}
|
||||
|
||||
shift = page_size * (head / page_size);
|
||||
offset += shift;
|
||||
head -= shift;
|
||||
|
||||
remap:
|
||||
buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
|
||||
MAP_SHARED, input, offset);
|
||||
if (buf == MAP_FAILED) {
|
||||
perror("failed to mmap file");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
more:
|
||||
event = (event_t *)(buf + head);
|
||||
|
||||
size = event->header.size;
|
||||
if (!size)
|
||||
size = 8;
|
||||
|
||||
if (head + event->header.size >= page_size * mmap_window) {
|
||||
int munmap_ret;
|
||||
|
||||
shift = page_size * (head / page_size);
|
||||
|
||||
munmap_ret = munmap(buf, page_size * mmap_window);
|
||||
assert(munmap_ret == 0);
|
||||
|
||||
offset += shift;
|
||||
head -= shift;
|
||||
goto remap;
|
||||
}
|
||||
|
||||
size = event->header.size;
|
||||
|
||||
dump_printf("\n%p [%p]: event: %d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)event->header.size,
|
||||
event->header.type);
|
||||
|
||||
if (!size || process_event(event, offset, head) < 0) {
|
||||
|
||||
dump_printf("%p [%p]: skipping unknown header type: %d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)(event->header.size),
|
||||
event->header.type);
|
||||
|
||||
/*
|
||||
* assume we lost track of the stream, check alignment, and
|
||||
* increment a single u64 in the hope to catch on again 'soon'.
|
||||
*/
|
||||
|
||||
if (unlikely(head & 7))
|
||||
head &= ~7ULL;
|
||||
|
||||
size = 8;
|
||||
}
|
||||
|
||||
head += size;
|
||||
|
||||
if (offset + head >= header->data_offset + header->data_size)
|
||||
goto done;
|
||||
|
||||
if (offset + head < (unsigned long)input_stat.st_size)
|
||||
goto more;
|
||||
|
||||
done:
|
||||
rc = EXIT_SUCCESS;
|
||||
close(input);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
31
tools/perf/util/data_map.h
Normal file
31
tools/perf/util/data_map.h
Normal file
@ -0,0 +1,31 @@
|
||||
#ifndef __PERF_DATAMAP_H
|
||||
#define __PERF_DATAMAP_H
|
||||
|
||||
#include "event.h"
|
||||
#include "header.h"
|
||||
|
||||
typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long);
|
||||
|
||||
struct perf_file_handler {
|
||||
event_type_handler_t process_sample_event;
|
||||
event_type_handler_t process_mmap_event;
|
||||
event_type_handler_t process_comm_event;
|
||||
event_type_handler_t process_fork_event;
|
||||
event_type_handler_t process_exit_event;
|
||||
event_type_handler_t process_lost_event;
|
||||
event_type_handler_t process_read_event;
|
||||
event_type_handler_t process_throttle_event;
|
||||
event_type_handler_t process_unthrottle_event;
|
||||
int (*sample_type_check)(u64 sample_type);
|
||||
unsigned long total_unknown;
|
||||
};
|
||||
|
||||
void register_perf_file_handler(struct perf_file_handler *handler);
|
||||
int mmap_dispatch_perf_file(struct perf_header **pheader,
|
||||
const char *input_name,
|
||||
int force,
|
||||
int full_paths,
|
||||
int *cwdlen,
|
||||
char **cwd);
|
||||
|
||||
#endif
|
@ -13,12 +13,12 @@
|
||||
int verbose = 0;
|
||||
int dump_trace = 0;
|
||||
|
||||
int eprintf(const char *fmt, ...)
|
||||
int eprintf(int level, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int ret = 0;
|
||||
|
||||
if (verbose) {
|
||||
if (verbose >= level) {
|
||||
va_start(args, fmt);
|
||||
ret = vfprintf(stderr, fmt, args);
|
||||
va_end(args);
|
||||
|
@ -1,8 +1,13 @@
|
||||
/* For debugging general purposes */
|
||||
#ifndef __PERF_DEBUG_H
|
||||
#define __PERF_DEBUG_H
|
||||
|
||||
extern int verbose;
|
||||
extern int dump_trace;
|
||||
|
||||
int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
|
||||
int eprintf(int level,
|
||||
const char *fmt, ...) __attribute__((format(printf, 2, 3)));
|
||||
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
|
||||
void trace_event(event_t *event);
|
||||
|
||||
#endif /* __PERF_DEBUG_H */
|
||||
|
@ -1,14 +1,10 @@
|
||||
#ifndef __PERF_RECORD_H
|
||||
#define __PERF_RECORD_H
|
||||
|
||||
#include "../perf.h"
|
||||
#include "util.h"
|
||||
#include <linux/list.h>
|
||||
|
||||
enum {
|
||||
SHOW_KERNEL = 1,
|
||||
SHOW_USER = 2,
|
||||
SHOW_HV = 4,
|
||||
};
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
/*
|
||||
* PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
|
||||
@ -78,11 +74,15 @@ typedef union event_union {
|
||||
} event_t;
|
||||
|
||||
struct map {
|
||||
struct list_head node;
|
||||
union {
|
||||
struct rb_node rb_node;
|
||||
struct list_head node;
|
||||
};
|
||||
u64 start;
|
||||
u64 end;
|
||||
u64 pgoff;
|
||||
u64 (*map_ip)(struct map *, u64);
|
||||
u64 (*unmap_ip)(struct map *, u64);
|
||||
struct dso *dso;
|
||||
};
|
||||
|
||||
@ -91,14 +91,24 @@ static inline u64 map__map_ip(struct map *map, u64 ip)
|
||||
return ip - map->start + map->pgoff;
|
||||
}
|
||||
|
||||
static inline u64 vdso__map_ip(struct map *map __used, u64 ip)
|
||||
static inline u64 map__unmap_ip(struct map *map, u64 ip)
|
||||
{
|
||||
return ip + map->start - map->pgoff;
|
||||
}
|
||||
|
||||
static inline u64 identity__map_ip(struct map *map __used, u64 ip)
|
||||
{
|
||||
return ip;
|
||||
}
|
||||
|
||||
struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen);
|
||||
struct symbol;
|
||||
|
||||
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
|
||||
|
||||
struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen,
|
||||
unsigned int sym_priv_size, symbol_filter_t filter);
|
||||
struct map *map__clone(struct map *self);
|
||||
int map__overlap(struct map *l, struct map *r);
|
||||
size_t map__fprintf(struct map *self, FILE *fp);
|
||||
|
||||
#endif
|
||||
#endif /* __PERF_RECORD_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef PERF_EXEC_CMD_H
|
||||
#define PERF_EXEC_CMD_H
|
||||
#ifndef __PERF_EXEC_CMD_H
|
||||
#define __PERF_EXEC_CMD_H
|
||||
|
||||
extern void perf_set_argv_exec_path(const char *exec_path);
|
||||
extern const char *perf_extract_argv0_path(const char *path);
|
||||
@ -10,4 +10,4 @@ extern int execv_perf_cmd(const char **argv); /* NULL terminated */
|
||||
extern int execl_perf_cmd(const char *cmd, ...);
|
||||
extern const char *system_path(const char *path);
|
||||
|
||||
#endif /* PERF_EXEC_CMD_H */
|
||||
#endif /* __PERF_EXEC_CMD_H */
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
#include "util.h"
|
||||
#include "header.h"
|
||||
#include "../perf.h"
|
||||
#include "trace-event.h"
|
||||
|
||||
/*
|
||||
* Create new perf.data header attribute:
|
||||
@ -46,23 +48,17 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
|
||||
*/
|
||||
struct perf_header *perf_header__new(void)
|
||||
{
|
||||
struct perf_header *self = malloc(sizeof(*self));
|
||||
struct perf_header *self = calloc(sizeof(*self), 1);
|
||||
|
||||
if (!self)
|
||||
die("nomem");
|
||||
|
||||
self->frozen = 0;
|
||||
|
||||
self->attrs = 0;
|
||||
self->size = 1;
|
||||
self->attr = malloc(sizeof(void *));
|
||||
|
||||
if (!self->attr)
|
||||
die("nomem");
|
||||
|
||||
self->data_offset = 0;
|
||||
self->data_size = 0;
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
@ -97,7 +93,7 @@ static struct perf_trace_event_type *events;
|
||||
void perf_header__push_event(u64 id, const char *name)
|
||||
{
|
||||
if (strlen(name) > MAX_EVENT_NAME)
|
||||
printf("Event %s will be truncated\n", name);
|
||||
pr_warning("Event %s will be truncated\n", name);
|
||||
|
||||
if (!events) {
|
||||
events = malloc(sizeof(struct perf_trace_event_type));
|
||||
@ -145,8 +141,14 @@ struct perf_file_header {
|
||||
struct perf_file_section attrs;
|
||||
struct perf_file_section data;
|
||||
struct perf_file_section event_types;
|
||||
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
|
||||
};
|
||||
|
||||
void perf_header__feat_trace_info(struct perf_header *header)
|
||||
{
|
||||
set_bit(HEADER_TRACE_INFO, header->adds_features);
|
||||
}
|
||||
|
||||
static void do_write(int fd, void *buf, size_t size)
|
||||
{
|
||||
while (size) {
|
||||
@ -160,6 +162,32 @@ static void do_write(int fd, void *buf, size_t size)
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_header__adds_write(struct perf_header *self, int fd)
|
||||
{
|
||||
struct perf_file_section trace_sec;
|
||||
u64 cur_offset = lseek(fd, 0, SEEK_CUR);
|
||||
unsigned long *feat_mask = self->adds_features;
|
||||
|
||||
if (test_bit(HEADER_TRACE_INFO, feat_mask)) {
|
||||
/* Write trace info */
|
||||
trace_sec.offset = lseek(fd, sizeof(trace_sec), SEEK_CUR);
|
||||
read_tracing_data(fd, attrs, nr_counters);
|
||||
trace_sec.size = lseek(fd, 0, SEEK_CUR) - trace_sec.offset;
|
||||
|
||||
/* Write trace info headers */
|
||||
lseek(fd, cur_offset, SEEK_SET);
|
||||
do_write(fd, &trace_sec, sizeof(trace_sec));
|
||||
|
||||
/*
|
||||
* Update cur_offset. So that other (future)
|
||||
* features can set their own infos in this place. But if we are
|
||||
* the only feature, at least that seeks to the place the data
|
||||
* should begin.
|
||||
*/
|
||||
cur_offset = lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET);
|
||||
}
|
||||
};
|
||||
|
||||
void perf_header__write(struct perf_header *self, int fd)
|
||||
{
|
||||
struct perf_file_header f_header;
|
||||
@ -198,6 +226,7 @@ void perf_header__write(struct perf_header *self, int fd)
|
||||
if (events)
|
||||
do_write(fd, events, self->event_size);
|
||||
|
||||
perf_header__adds_write(self, fd);
|
||||
|
||||
self->data_offset = lseek(fd, 0, SEEK_CUR);
|
||||
|
||||
@ -219,6 +248,8 @@ void perf_header__write(struct perf_header *self, int fd)
|
||||
},
|
||||
};
|
||||
|
||||
memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
|
||||
|
||||
lseek(fd, 0, SEEK_SET);
|
||||
do_write(fd, &f_header, sizeof(f_header));
|
||||
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
|
||||
@ -241,6 +272,20 @@ static void do_read(int fd, void *buf, size_t size)
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_header__adds_read(struct perf_header *self, int fd)
|
||||
{
|
||||
const unsigned long *feat_mask = self->adds_features;
|
||||
|
||||
if (test_bit(HEADER_TRACE_INFO, feat_mask)) {
|
||||
struct perf_file_section trace_sec;
|
||||
|
||||
do_read(fd, &trace_sec, sizeof(trace_sec));
|
||||
lseek(fd, trace_sec.offset, SEEK_SET);
|
||||
trace_report(fd);
|
||||
lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET);
|
||||
}
|
||||
};
|
||||
|
||||
struct perf_header *perf_header__read(int fd)
|
||||
{
|
||||
struct perf_header *self = perf_header__new();
|
||||
@ -254,10 +299,16 @@ struct perf_header *perf_header__read(int fd)
|
||||
do_read(fd, &f_header, sizeof(f_header));
|
||||
|
||||
if (f_header.magic != PERF_MAGIC ||
|
||||
f_header.size != sizeof(f_header) ||
|
||||
f_header.attr_size != sizeof(f_attr))
|
||||
die("incompatible file format");
|
||||
|
||||
if (f_header.size != sizeof(f_header)) {
|
||||
/* Support the previous format */
|
||||
if (f_header.size == offsetof(typeof(f_header), adds_features))
|
||||
bitmap_zero(f_header.adds_features, HEADER_FEAT_BITS);
|
||||
else
|
||||
die("incompatible file format");
|
||||
}
|
||||
nr_attrs = f_header.attrs.size / sizeof(f_attr);
|
||||
lseek(fd, f_header.attrs.offset, SEEK_SET);
|
||||
|
||||
@ -290,6 +341,11 @@ struct perf_header *perf_header__read(int fd)
|
||||
do_read(fd, events, f_header.event_types.size);
|
||||
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
|
||||
}
|
||||
|
||||
memcpy(&self->adds_features, &f_header.adds_features, sizeof(f_header.adds_features));
|
||||
|
||||
perf_header__adds_read(self, fd);
|
||||
|
||||
self->event_offset = f_header.event_types.offset;
|
||||
self->event_size = f_header.event_types.size;
|
||||
|
||||
|
@ -1,10 +1,12 @@
|
||||
#ifndef _PERF_HEADER_H
|
||||
#define _PERF_HEADER_H
|
||||
#ifndef __PERF_HEADER_H
|
||||
#define __PERF_HEADER_H
|
||||
|
||||
#include "../../../include/linux/perf_event.h"
|
||||
#include <sys/types.h>
|
||||
#include "types.h"
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
struct perf_header_attr {
|
||||
struct perf_event_attr attr;
|
||||
int ids, size;
|
||||
@ -12,15 +14,20 @@ struct perf_header_attr {
|
||||
off_t id_offset;
|
||||
};
|
||||
|
||||
#define HEADER_TRACE_INFO 1
|
||||
|
||||
#define HEADER_FEAT_BITS 256
|
||||
|
||||
struct perf_header {
|
||||
int frozen;
|
||||
int attrs, size;
|
||||
int frozen;
|
||||
int attrs, size;
|
||||
struct perf_header_attr **attr;
|
||||
s64 attr_offset;
|
||||
u64 data_offset;
|
||||
u64 data_size;
|
||||
u64 event_offset;
|
||||
u64 event_size;
|
||||
s64 attr_offset;
|
||||
u64 data_offset;
|
||||
u64 data_size;
|
||||
u64 event_offset;
|
||||
u64 event_size;
|
||||
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
|
||||
};
|
||||
|
||||
struct perf_header *perf_header__read(int fd);
|
||||
@ -40,8 +47,8 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
|
||||
u64 perf_header__sample_type(struct perf_header *header);
|
||||
struct perf_event_attr *
|
||||
perf_header__find_attr(u64 id, struct perf_header *header);
|
||||
|
||||
void perf_header__feat_trace_info(struct perf_header *header);
|
||||
|
||||
struct perf_header *perf_header__new(void);
|
||||
|
||||
#endif /* _PERF_HEADER_H */
|
||||
#endif /* __PERF_HEADER_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef HELP_H
|
||||
#define HELP_H
|
||||
#ifndef __PERF_HELP_H
|
||||
#define __PERF_HELP_H
|
||||
|
||||
struct cmdnames {
|
||||
size_t alloc;
|
||||
@ -26,4 +26,4 @@ int is_in_cmdlist(struct cmdnames *c, const char *s);
|
||||
void list_commands(const char *title, struct cmdnames *main_cmds,
|
||||
struct cmdnames *other_cmds);
|
||||
|
||||
#endif /* HELP_H */
|
||||
#endif /* __PERF_HELP_H */
|
||||
|
210
tools/perf/util/hist.c
Normal file
210
tools/perf/util/hist.c
Normal file
@ -0,0 +1,210 @@
|
||||
#include "hist.h"
|
||||
|
||||
struct rb_root hist;
|
||||
struct rb_root collapse_hists;
|
||||
struct rb_root output_hists;
|
||||
int callchain;
|
||||
|
||||
struct callchain_param callchain_param = {
|
||||
.mode = CHAIN_GRAPH_REL,
|
||||
.min_percent = 0.5
|
||||
};
|
||||
|
||||
unsigned long total;
|
||||
unsigned long total_mmap;
|
||||
unsigned long total_comm;
|
||||
unsigned long total_fork;
|
||||
unsigned long total_unknown;
|
||||
unsigned long total_lost;
|
||||
|
||||
/*
|
||||
* histogram, sorted on item, collects counts
|
||||
*/
|
||||
|
||||
struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
|
||||
struct symbol *sym,
|
||||
struct symbol *sym_parent,
|
||||
u64 ip, u64 count, char level, bool *hit)
|
||||
{
|
||||
struct rb_node **p = &hist.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct hist_entry *he;
|
||||
struct hist_entry entry = {
|
||||
.thread = thread,
|
||||
.map = map,
|
||||
.sym = sym,
|
||||
.ip = ip,
|
||||
.level = level,
|
||||
.count = count,
|
||||
.parent = sym_parent,
|
||||
};
|
||||
int cmp;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
he = rb_entry(parent, struct hist_entry, rb_node);
|
||||
|
||||
cmp = hist_entry__cmp(&entry, he);
|
||||
|
||||
if (!cmp) {
|
||||
*hit = true;
|
||||
return he;
|
||||
}
|
||||
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
he = malloc(sizeof(*he));
|
||||
if (!he)
|
||||
return NULL;
|
||||
*he = entry;
|
||||
rb_link_node(&he->rb_node, parent, p);
|
||||
rb_insert_color(&he->rb_node, &hist);
|
||||
*hit = false;
|
||||
return he;
|
||||
}
|
||||
|
||||
int64_t
|
||||
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct sort_entry *se;
|
||||
int64_t cmp = 0;
|
||||
|
||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||
cmp = se->cmp(left, right);
|
||||
if (cmp)
|
||||
break;
|
||||
}
|
||||
|
||||
return cmp;
|
||||
}
|
||||
|
||||
int64_t
|
||||
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct sort_entry *se;
|
||||
int64_t cmp = 0;
|
||||
|
||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||
int64_t (*f)(struct hist_entry *, struct hist_entry *);
|
||||
|
||||
f = se->collapse ?: se->cmp;
|
||||
|
||||
cmp = f(left, right);
|
||||
if (cmp)
|
||||
break;
|
||||
}
|
||||
|
||||
return cmp;
|
||||
}
|
||||
|
||||
void hist_entry__free(struct hist_entry *he)
|
||||
{
|
||||
free(he);
|
||||
}
|
||||
|
||||
/*
|
||||
* collapse the histogram
|
||||
*/
|
||||
|
||||
void collapse__insert_entry(struct hist_entry *he)
|
||||
{
|
||||
struct rb_node **p = &collapse_hists.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct hist_entry *iter;
|
||||
int64_t cmp;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
iter = rb_entry(parent, struct hist_entry, rb_node);
|
||||
|
||||
cmp = hist_entry__collapse(iter, he);
|
||||
|
||||
if (!cmp) {
|
||||
iter->count += he->count;
|
||||
hist_entry__free(he);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&he->rb_node, parent, p);
|
||||
rb_insert_color(&he->rb_node, &collapse_hists);
|
||||
}
|
||||
|
||||
void collapse__resort(void)
|
||||
{
|
||||
struct rb_node *next;
|
||||
struct hist_entry *n;
|
||||
|
||||
if (!sort__need_collapse)
|
||||
return;
|
||||
|
||||
next = rb_first(&hist);
|
||||
while (next) {
|
||||
n = rb_entry(next, struct hist_entry, rb_node);
|
||||
next = rb_next(&n->rb_node);
|
||||
|
||||
rb_erase(&n->rb_node, &hist);
|
||||
collapse__insert_entry(n);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* reverse the map, sort on count.
|
||||
*/
|
||||
|
||||
void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
|
||||
{
|
||||
struct rb_node **p = &output_hists.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct hist_entry *iter;
|
||||
|
||||
if (callchain)
|
||||
callchain_param.sort(&he->sorted_chain, &he->callchain,
|
||||
min_callchain_hits, &callchain_param);
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
iter = rb_entry(parent, struct hist_entry, rb_node);
|
||||
|
||||
if (he->count > iter->count)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&he->rb_node, parent, p);
|
||||
rb_insert_color(&he->rb_node, &output_hists);
|
||||
}
|
||||
|
||||
void output__resort(u64 total_samples)
|
||||
{
|
||||
struct rb_node *next;
|
||||
struct hist_entry *n;
|
||||
struct rb_root *tree = &hist;
|
||||
u64 min_callchain_hits;
|
||||
|
||||
min_callchain_hits =
|
||||
total_samples * (callchain_param.min_percent / 100);
|
||||
|
||||
if (sort__need_collapse)
|
||||
tree = &collapse_hists;
|
||||
|
||||
next = rb_first(tree);
|
||||
|
||||
while (next) {
|
||||
n = rb_entry(next, struct hist_entry, rb_node);
|
||||
next = rb_next(&n->rb_node);
|
||||
|
||||
rb_erase(&n->rb_node, tree);
|
||||
output__insert_entry(n, min_callchain_hits);
|
||||
}
|
||||
}
|
50
tools/perf/util/hist.h
Normal file
50
tools/perf/util/hist.h
Normal file
@ -0,0 +1,50 @@
|
||||
#ifndef __PERF_HIST_H
|
||||
#define __PERF_HIST_H
|
||||
#include "../builtin.h"
|
||||
|
||||
#include "util.h"
|
||||
|
||||
#include "color.h"
|
||||
#include <linux/list.h>
|
||||
#include "cache.h"
|
||||
#include <linux/rbtree.h>
|
||||
#include "symbol.h"
|
||||
#include "string.h"
|
||||
#include "callchain.h"
|
||||
#include "strlist.h"
|
||||
#include "values.h"
|
||||
|
||||
#include "../perf.h"
|
||||
#include "debug.h"
|
||||
#include "header.h"
|
||||
|
||||
#include "parse-options.h"
|
||||
#include "parse-events.h"
|
||||
|
||||
#include "thread.h"
|
||||
#include "sort.h"
|
||||
|
||||
extern struct rb_root hist;
|
||||
extern struct rb_root collapse_hists;
|
||||
extern struct rb_root output_hists;
|
||||
extern int callchain;
|
||||
extern struct callchain_param callchain_param;
|
||||
extern unsigned long total;
|
||||
extern unsigned long total_mmap;
|
||||
extern unsigned long total_comm;
|
||||
extern unsigned long total_fork;
|
||||
extern unsigned long total_unknown;
|
||||
extern unsigned long total_lost;
|
||||
|
||||
struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
|
||||
struct symbol *sym, struct symbol *parent,
|
||||
u64 ip, u64 count, char level, bool *hit);
|
||||
extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
|
||||
extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
|
||||
extern void hist_entry__free(struct hist_entry *);
|
||||
extern void collapse__insert_entry(struct hist_entry *);
|
||||
extern void collapse__resort(void);
|
||||
extern void output__insert_entry(struct hist_entry *, u64);
|
||||
extern void output__resort(u64);
|
||||
|
||||
#endif /* __PERF_HIST_H */
|
1
tools/perf/util/include/asm/asm-offsets.h
Normal file
1
tools/perf/util/include/asm/asm-offsets.h
Normal file
@ -0,0 +1 @@
|
||||
/* stub */
|
18
tools/perf/util/include/asm/bitops.h
Normal file
18
tools/perf/util/include/asm/bitops.h
Normal file
@ -0,0 +1,18 @@
|
||||
#ifndef _PERF_ASM_BITOPS_H_
|
||||
#define _PERF_ASM_BITOPS_H_
|
||||
|
||||
#include <sys/types.h>
|
||||
#include "../../types.h"
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* CHECKME: Not sure both always match */
|
||||
#define BITS_PER_LONG __WORDSIZE
|
||||
|
||||
#include "../../../../include/asm-generic/bitops/__fls.h"
|
||||
#include "../../../../include/asm-generic/bitops/fls.h"
|
||||
#include "../../../../include/asm-generic/bitops/fls64.h"
|
||||
#include "../../../../include/asm-generic/bitops/__ffs.h"
|
||||
#include "../../../../include/asm-generic/bitops/ffz.h"
|
||||
#include "../../../../include/asm-generic/bitops/hweight.h"
|
||||
|
||||
#endif
|
2
tools/perf/util/include/asm/byteorder.h
Normal file
2
tools/perf/util/include/asm/byteorder.h
Normal file
@ -0,0 +1,2 @@
|
||||
#include <asm/types.h>
|
||||
#include "../../../../include/linux/swab.h"
|
1
tools/perf/util/include/asm/swab.h
Normal file
1
tools/perf/util/include/asm/swab.h
Normal file
@ -0,0 +1 @@
|
||||
/* stub */
|
14
tools/perf/util/include/asm/uaccess.h
Normal file
14
tools/perf/util/include/asm/uaccess.h
Normal file
@ -0,0 +1,14 @@
|
||||
#ifndef _PERF_ASM_UACCESS_H_
|
||||
#define _PERF_ASM_UACCESS_H_
|
||||
|
||||
#define __get_user(src, dest) \
|
||||
({ \
|
||||
(src) = *dest; \
|
||||
0; \
|
||||
})
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
#define access_ok(type, addr, size) 1
|
||||
|
||||
#endif
|
2
tools/perf/util/include/linux/bitmap.h
Normal file
2
tools/perf/util/include/linux/bitmap.h
Normal file
@ -0,0 +1,2 @@
|
||||
#include "../../../../include/linux/bitmap.h"
|
||||
#include "../../../../include/asm-generic/bitops/find.h"
|
27
tools/perf/util/include/linux/bitops.h
Normal file
27
tools/perf/util/include/linux/bitops.h
Normal file
@ -0,0 +1,27 @@
|
||||
#ifndef _PERF_LINUX_BITOPS_H_
|
||||
#define _PERF_LINUX_BITOPS_H_
|
||||
|
||||
#define __KERNEL__
|
||||
|
||||
#define CONFIG_GENERIC_FIND_NEXT_BIT
|
||||
#define CONFIG_GENERIC_FIND_FIRST_BIT
|
||||
#include "../../../../include/linux/bitops.h"
|
||||
|
||||
static inline void set_bit(int nr, unsigned long *addr)
|
||||
{
|
||||
addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
|
||||
}
|
||||
|
||||
static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
|
||||
{
|
||||
return ((1UL << (nr % BITS_PER_LONG)) &
|
||||
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
|
||||
}
|
||||
|
||||
unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
|
||||
long size, unsigned long offset);
|
||||
|
||||
unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned
|
||||
long size, unsigned long offset);
|
||||
|
||||
#endif
|
10
tools/perf/util/include/linux/compiler.h
Normal file
10
tools/perf/util/include/linux/compiler.h
Normal file
@ -0,0 +1,10 @@
|
||||
#ifndef _PERF_LINUX_COMPILER_H_
|
||||
#define _PERF_LINUX_COMPILER_H_
|
||||
|
||||
#ifndef __always_inline
|
||||
#define __always_inline inline
|
||||
#endif
|
||||
#define __user
|
||||
#define __attribute_const__
|
||||
|
||||
#endif
|
1
tools/perf/util/include/linux/ctype.h
Normal file
1
tools/perf/util/include/linux/ctype.h
Normal file
@ -0,0 +1 @@
|
||||
#include "../../../../include/linux/ctype.h"
|
@ -1,6 +1,16 @@
|
||||
#ifndef PERF_LINUX_KERNEL_H_
|
||||
#define PERF_LINUX_KERNEL_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
|
||||
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
|
||||
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
|
||||
|
||||
#ifndef offsetof
|
||||
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
@ -26,4 +36,70 @@
|
||||
_max1 > _max2 ? _max1 : _max2; })
|
||||
#endif
|
||||
|
||||
#ifndef min
|
||||
#define min(x, y) ({ \
|
||||
typeof(x) _min1 = (x); \
|
||||
typeof(y) _min2 = (y); \
|
||||
(void) (&_min1 == &_min2); \
|
||||
_min1 < _min2 ? _min1 : _min2; })
|
||||
#endif
|
||||
|
||||
#ifndef BUG_ON
|
||||
#define BUG_ON(cond) assert(!(cond))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Both need more care to handle endianness
|
||||
* (Don't use bitmap_copy_le() for now)
|
||||
*/
|
||||
#define cpu_to_le64(x) (x)
|
||||
#define cpu_to_le32(x) (x)
|
||||
|
||||
static inline int
|
||||
vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||
{
|
||||
int i;
|
||||
ssize_t ssize = size;
|
||||
|
||||
i = vsnprintf(buf, size, fmt, args);
|
||||
|
||||
return (i >= ssize) ? (ssize - 1) : i;
|
||||
}
|
||||
|
||||
static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
ssize_t ssize = size;
|
||||
int i;
|
||||
|
||||
va_start(args, fmt);
|
||||
i = vsnprintf(buf, size, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
return (i >= ssize) ? (ssize - 1) : i;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
simple_strtoul(const char *nptr, char **endptr, int base)
|
||||
{
|
||||
return strtoul(nptr, endptr, base);
|
||||
}
|
||||
|
||||
#ifndef pr_fmt
|
||||
#define pr_fmt(fmt) fmt
|
||||
#endif
|
||||
|
||||
#define pr_err(fmt, ...) \
|
||||
do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
|
||||
#define pr_warning(fmt, ...) \
|
||||
do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
|
||||
#define pr_info(fmt, ...) \
|
||||
do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
|
||||
#define pr_debug(fmt, ...) \
|
||||
eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#define pr_debugN(n, fmt, ...) \
|
||||
eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
#endif
|
||||
|
1
tools/perf/util/include/linux/string.h
Normal file
1
tools/perf/util/include/linux/string.h
Normal file
@ -0,0 +1 @@
|
||||
#include <string.h>
|
9
tools/perf/util/include/linux/types.h
Normal file
9
tools/perf/util/include/linux/types.h
Normal file
@ -0,0 +1,9 @@
|
||||
#ifndef _PERF_LINUX_TYPES_H_
|
||||
#define _PERF_LINUX_TYPES_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
#define DECLARE_BITMAP(name,bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
||||
#endif
|
@ -1,8 +1,8 @@
|
||||
#ifndef LEVENSHTEIN_H
|
||||
#define LEVENSHTEIN_H
|
||||
#ifndef __PERF_LEVENSHTEIN_H
|
||||
#define __PERF_LEVENSHTEIN_H
|
||||
|
||||
int levenshtein(const char *string1, const char *string2,
|
||||
int swap_penalty, int substition_penalty,
|
||||
int insertion_penalty, int deletion_penalty);
|
||||
|
||||
#endif
|
||||
#endif /* __PERF_LEVENSHTEIN_H */
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "debug.h"
|
||||
|
||||
static inline int is_anon_memory(const char *filename)
|
||||
{
|
||||
@ -19,7 +20,8 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen)
|
||||
return n;
|
||||
}
|
||||
|
||||
struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
|
||||
struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen,
|
||||
unsigned int sym_priv_size, symbol_filter_t filter)
|
||||
{
|
||||
struct map *self = malloc(sizeof(*self));
|
||||
|
||||
@ -27,6 +29,7 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen)
|
||||
const char *filename = event->filename;
|
||||
char newfilename[PATH_MAX];
|
||||
int anon;
|
||||
bool new_dso;
|
||||
|
||||
if (cwd) {
|
||||
int n = strcommon(filename, cwd, cwdlen);
|
||||
@ -49,14 +52,29 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen)
|
||||
self->end = event->start + event->len;
|
||||
self->pgoff = event->pgoff;
|
||||
|
||||
self->dso = dsos__findnew(filename);
|
||||
self->dso = dsos__findnew(filename, sym_priv_size, &new_dso);
|
||||
if (self->dso == NULL)
|
||||
goto out_delete;
|
||||
|
||||
if (new_dso) {
|
||||
int nr = dso__load(self->dso, self, filter);
|
||||
|
||||
if (nr < 0)
|
||||
pr_warning("Failed to open %s, continuing "
|
||||
"without symbols\n",
|
||||
self->dso->long_name);
|
||||
else if (nr == 0)
|
||||
pr_warning("No symbols found in %s, maybe "
|
||||
"install a debug package?\n",
|
||||
self->dso->long_name);
|
||||
}
|
||||
|
||||
if (self->dso == vdso || anon)
|
||||
self->map_ip = vdso__map_ip;
|
||||
else
|
||||
self->map_ip = self->unmap_ip = identity__map_ip;
|
||||
else {
|
||||
self->map_ip = map__map_ip;
|
||||
self->unmap_ip = map__unmap_ip;
|
||||
}
|
||||
}
|
||||
return self;
|
||||
out_delete:
|
||||
|
@ -1,545 +0,0 @@
|
||||
#include "util.h"
|
||||
#include "../perf.h"
|
||||
#include "string.h"
|
||||
#include "module.h"
|
||||
|
||||
#include <libelf.h>
|
||||
#include <libgen.h>
|
||||
#include <gelf.h>
|
||||
#include <elf.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/utsname.h>
|
||||
|
||||
static unsigned int crc32(const char *p, unsigned int len)
|
||||
{
|
||||
int i;
|
||||
unsigned int crc = 0;
|
||||
|
||||
while (len--) {
|
||||
crc ^= *p++;
|
||||
for (i = 0; i < 8; i++)
|
||||
crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
|
||||
/* module section methods */
|
||||
|
||||
struct sec_dso *sec_dso__new_dso(const char *name)
|
||||
{
|
||||
struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
|
||||
|
||||
if (self != NULL) {
|
||||
strcpy(self->name, name);
|
||||
self->secs = RB_ROOT;
|
||||
self->find_section = sec_dso__find_section;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
static void sec_dso__delete_section(struct section *self)
|
||||
{
|
||||
free(((void *)self));
|
||||
}
|
||||
|
||||
void sec_dso__delete_sections(struct sec_dso *self)
|
||||
{
|
||||
struct section *pos;
|
||||
struct rb_node *next = rb_first(&self->secs);
|
||||
|
||||
while (next) {
|
||||
pos = rb_entry(next, struct section, rb_node);
|
||||
next = rb_next(&pos->rb_node);
|
||||
rb_erase(&pos->rb_node, &self->secs);
|
||||
sec_dso__delete_section(pos);
|
||||
}
|
||||
}
|
||||
|
||||
void sec_dso__delete_self(struct sec_dso *self)
|
||||
{
|
||||
sec_dso__delete_sections(self);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static void sec_dso__insert_section(struct sec_dso *self, struct section *sec)
|
||||
{
|
||||
struct rb_node **p = &self->secs.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
const u64 hash = sec->hash;
|
||||
struct section *s;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
s = rb_entry(parent, struct section, rb_node);
|
||||
if (hash < s->hash)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
rb_link_node(&sec->rb_node, parent, p);
|
||||
rb_insert_color(&sec->rb_node, &self->secs);
|
||||
}
|
||||
|
||||
struct section *sec_dso__find_section(struct sec_dso *self, const char *name)
|
||||
{
|
||||
struct rb_node *n;
|
||||
u64 hash;
|
||||
int len;
|
||||
|
||||
if (self == NULL)
|
||||
return NULL;
|
||||
|
||||
len = strlen(name);
|
||||
hash = crc32(name, len);
|
||||
|
||||
n = self->secs.rb_node;
|
||||
|
||||
while (n) {
|
||||
struct section *s = rb_entry(n, struct section, rb_node);
|
||||
|
||||
if (hash < s->hash)
|
||||
n = n->rb_left;
|
||||
else if (hash > s->hash)
|
||||
n = n->rb_right;
|
||||
else {
|
||||
if (!strcmp(name, s->name))
|
||||
return s;
|
||||
else
|
||||
n = rb_next(&s->rb_node);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static size_t sec_dso__fprintf_section(struct section *self, FILE *fp)
|
||||
{
|
||||
return fprintf(fp, "name:%s vma:%llx path:%s\n",
|
||||
self->name, self->vma, self->path);
|
||||
}
|
||||
|
||||
size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp)
|
||||
{
|
||||
size_t ret = fprintf(fp, "dso: %s\n", self->name);
|
||||
|
||||
struct rb_node *nd;
|
||||
for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) {
|
||||
struct section *pos = rb_entry(nd, struct section, rb_node);
|
||||
ret += sec_dso__fprintf_section(pos, fp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct section *section__new(const char *name, const char *path)
|
||||
{
|
||||
struct section *self = calloc(1, sizeof(*self));
|
||||
|
||||
if (!self)
|
||||
goto out_failure;
|
||||
|
||||
self->name = calloc(1, strlen(name) + 1);
|
||||
if (!self->name)
|
||||
goto out_failure;
|
||||
|
||||
self->path = calloc(1, strlen(path) + 1);
|
||||
if (!self->path)
|
||||
goto out_failure;
|
||||
|
||||
strcpy(self->name, name);
|
||||
strcpy(self->path, path);
|
||||
self->hash = crc32(self->name, strlen(name));
|
||||
|
||||
return self;
|
||||
|
||||
out_failure:
|
||||
if (self) {
|
||||
if (self->name)
|
||||
free(self->name);
|
||||
if (self->path)
|
||||
free(self->path);
|
||||
free(self);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* module methods */
|
||||
|
||||
struct mod_dso *mod_dso__new_dso(const char *name)
|
||||
{
|
||||
struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
|
||||
|
||||
if (self != NULL) {
|
||||
strcpy(self->name, name);
|
||||
self->mods = RB_ROOT;
|
||||
self->find_module = mod_dso__find_module;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
static void mod_dso__delete_module(struct module *self)
|
||||
{
|
||||
free(((void *)self));
|
||||
}
|
||||
|
||||
void mod_dso__delete_modules(struct mod_dso *self)
|
||||
{
|
||||
struct module *pos;
|
||||
struct rb_node *next = rb_first(&self->mods);
|
||||
|
||||
while (next) {
|
||||
pos = rb_entry(next, struct module, rb_node);
|
||||
next = rb_next(&pos->rb_node);
|
||||
rb_erase(&pos->rb_node, &self->mods);
|
||||
mod_dso__delete_module(pos);
|
||||
}
|
||||
}
|
||||
|
||||
void mod_dso__delete_self(struct mod_dso *self)
|
||||
{
|
||||
mod_dso__delete_modules(self);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static void mod_dso__insert_module(struct mod_dso *self, struct module *mod)
|
||||
{
|
||||
struct rb_node **p = &self->mods.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
const u64 hash = mod->hash;
|
||||
struct module *m;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
m = rb_entry(parent, struct module, rb_node);
|
||||
if (hash < m->hash)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
rb_link_node(&mod->rb_node, parent, p);
|
||||
rb_insert_color(&mod->rb_node, &self->mods);
|
||||
}
|
||||
|
||||
struct module *mod_dso__find_module(struct mod_dso *self, const char *name)
|
||||
{
|
||||
struct rb_node *n;
|
||||
u64 hash;
|
||||
int len;
|
||||
|
||||
if (self == NULL)
|
||||
return NULL;
|
||||
|
||||
len = strlen(name);
|
||||
hash = crc32(name, len);
|
||||
|
||||
n = self->mods.rb_node;
|
||||
|
||||
while (n) {
|
||||
struct module *m = rb_entry(n, struct module, rb_node);
|
||||
|
||||
if (hash < m->hash)
|
||||
n = n->rb_left;
|
||||
else if (hash > m->hash)
|
||||
n = n->rb_right;
|
||||
else {
|
||||
if (!strcmp(name, m->name))
|
||||
return m;
|
||||
else
|
||||
n = rb_next(&m->rb_node);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static size_t mod_dso__fprintf_module(struct module *self, FILE *fp)
|
||||
{
|
||||
return fprintf(fp, "name:%s path:%s\n", self->name, self->path);
|
||||
}
|
||||
|
||||
size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
size_t ret;
|
||||
|
||||
ret = fprintf(fp, "dso: %s\n", self->name);
|
||||
|
||||
for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) {
|
||||
struct module *pos = rb_entry(nd, struct module, rb_node);
|
||||
|
||||
ret += mod_dso__fprintf_module(pos, fp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct module *module__new(const char *name, const char *path)
|
||||
{
|
||||
struct module *self = calloc(1, sizeof(*self));
|
||||
|
||||
if (!self)
|
||||
goto out_failure;
|
||||
|
||||
self->name = calloc(1, strlen(name) + 1);
|
||||
if (!self->name)
|
||||
goto out_failure;
|
||||
|
||||
self->path = calloc(1, strlen(path) + 1);
|
||||
if (!self->path)
|
||||
goto out_failure;
|
||||
|
||||
strcpy(self->name, name);
|
||||
strcpy(self->path, path);
|
||||
self->hash = crc32(self->name, strlen(name));
|
||||
|
||||
return self;
|
||||
|
||||
out_failure:
|
||||
if (self) {
|
||||
if (self->name)
|
||||
free(self->name);
|
||||
if (self->path)
|
||||
free(self->path);
|
||||
free(self);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int mod_dso__load_sections(struct module *mod)
|
||||
{
|
||||
int count = 0, path_len;
|
||||
struct dirent *entry;
|
||||
char *line = NULL;
|
||||
char *dir_path;
|
||||
DIR *dir;
|
||||
size_t n;
|
||||
|
||||
path_len = strlen("/sys/module/");
|
||||
path_len += strlen(mod->name);
|
||||
path_len += strlen("/sections/");
|
||||
|
||||
dir_path = calloc(1, path_len + 1);
|
||||
if (dir_path == NULL)
|
||||
goto out_failure;
|
||||
|
||||
strcat(dir_path, "/sys/module/");
|
||||
strcat(dir_path, mod->name);
|
||||
strcat(dir_path, "/sections/");
|
||||
|
||||
dir = opendir(dir_path);
|
||||
if (dir == NULL)
|
||||
goto out_free;
|
||||
|
||||
while ((entry = readdir(dir))) {
|
||||
struct section *section;
|
||||
char *path, *vma;
|
||||
int line_len;
|
||||
FILE *file;
|
||||
|
||||
if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name))
|
||||
continue;
|
||||
|
||||
path = calloc(1, path_len + strlen(entry->d_name) + 1);
|
||||
if (path == NULL)
|
||||
break;
|
||||
strcat(path, dir_path);
|
||||
strcat(path, entry->d_name);
|
||||
|
||||
file = fopen(path, "r");
|
||||
if (file == NULL) {
|
||||
free(path);
|
||||
break;
|
||||
}
|
||||
|
||||
line_len = getline(&line, &n, file);
|
||||
if (line_len < 0) {
|
||||
free(path);
|
||||
fclose(file);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!line) {
|
||||
free(path);
|
||||
fclose(file);
|
||||
break;
|
||||
}
|
||||
|
||||
line[--line_len] = '\0'; /* \n */
|
||||
|
||||
vma = strstr(line, "0x");
|
||||
if (!vma) {
|
||||
free(path);
|
||||
fclose(file);
|
||||
break;
|
||||
}
|
||||
vma += 2;
|
||||
|
||||
section = section__new(entry->d_name, path);
|
||||
if (!section) {
|
||||
fprintf(stderr, "load_sections: allocation error\n");
|
||||
free(path);
|
||||
fclose(file);
|
||||
break;
|
||||
}
|
||||
|
||||
hex2u64(vma, §ion->vma);
|
||||
sec_dso__insert_section(mod->sections, section);
|
||||
|
||||
free(path);
|
||||
fclose(file);
|
||||
count++;
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
free(line);
|
||||
free(dir_path);
|
||||
|
||||
return count;
|
||||
|
||||
out_free:
|
||||
free(dir_path);
|
||||
|
||||
out_failure:
|
||||
return count;
|
||||
}
|
||||
|
||||
static int mod_dso__load_module_paths(struct mod_dso *self)
|
||||
{
|
||||
struct utsname uts;
|
||||
int count = 0, len, err = -1;
|
||||
char *line = NULL;
|
||||
FILE *file;
|
||||
char *dpath, *dir;
|
||||
size_t n;
|
||||
|
||||
if (uname(&uts) < 0)
|
||||
return err;
|
||||
|
||||
len = strlen("/lib/modules/");
|
||||
len += strlen(uts.release);
|
||||
len += strlen("/modules.dep");
|
||||
|
||||
dpath = calloc(1, len + 1);
|
||||
if (dpath == NULL)
|
||||
return err;
|
||||
|
||||
strcat(dpath, "/lib/modules/");
|
||||
strcat(dpath, uts.release);
|
||||
strcat(dpath, "/modules.dep");
|
||||
|
||||
file = fopen(dpath, "r");
|
||||
if (file == NULL)
|
||||
goto out_failure;
|
||||
|
||||
dir = dirname(dpath);
|
||||
if (!dir)
|
||||
goto out_failure;
|
||||
strcat(dir, "/");
|
||||
|
||||
while (!feof(file)) {
|
||||
struct module *module;
|
||||
char *name, *path, *tmp;
|
||||
FILE *modfile;
|
||||
int line_len;
|
||||
|
||||
line_len = getline(&line, &n, file);
|
||||
if (line_len < 0)
|
||||
break;
|
||||
|
||||
if (!line)
|
||||
break;
|
||||
|
||||
line[--line_len] = '\0'; /* \n */
|
||||
|
||||
path = strchr(line, ':');
|
||||
if (!path)
|
||||
break;
|
||||
*path = '\0';
|
||||
|
||||
path = strdup(line);
|
||||
if (!path)
|
||||
break;
|
||||
|
||||
if (!strstr(path, dir)) {
|
||||
if (strncmp(path, "kernel/", 7))
|
||||
break;
|
||||
|
||||
free(path);
|
||||
path = calloc(1, strlen(dir) + strlen(line) + 1);
|
||||
if (!path)
|
||||
break;
|
||||
strcat(path, dir);
|
||||
strcat(path, line);
|
||||
}
|
||||
|
||||
modfile = fopen(path, "r");
|
||||
if (modfile == NULL)
|
||||
break;
|
||||
fclose(modfile);
|
||||
|
||||
name = strdup(path);
|
||||
if (!name)
|
||||
break;
|
||||
|
||||
name = strtok(name, "/");
|
||||
tmp = name;
|
||||
|
||||
while (tmp) {
|
||||
tmp = strtok(NULL, "/");
|
||||
if (tmp)
|
||||
name = tmp;
|
||||
}
|
||||
|
||||
name = strsep(&name, ".");
|
||||
if (!name)
|
||||
break;
|
||||
|
||||
/* Quirk: replace '-' with '_' in all modules */
|
||||
for (len = strlen(name); len; len--) {
|
||||
if (*(name+len) == '-')
|
||||
*(name+len) = '_';
|
||||
}
|
||||
|
||||
module = module__new(name, path);
|
||||
if (!module)
|
||||
break;
|
||||
mod_dso__insert_module(self, module);
|
||||
|
||||
module->sections = sec_dso__new_dso("sections");
|
||||
if (!module->sections)
|
||||
break;
|
||||
|
||||
module->active = mod_dso__load_sections(module);
|
||||
|
||||
if (module->active > 0)
|
||||
count++;
|
||||
}
|
||||
|
||||
if (feof(file))
|
||||
err = count;
|
||||
else
|
||||
fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n");
|
||||
|
||||
out_failure:
|
||||
if (dpath)
|
||||
free(dpath);
|
||||
if (file)
|
||||
fclose(file);
|
||||
if (line)
|
||||
free(line);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mod_dso__load_modules(struct mod_dso *dso)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mod_dso__load_module_paths(dso);
|
||||
|
||||
return err;
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
#ifndef _PERF_MODULE_
|
||||
#define _PERF_MODULE_ 1
|
||||
|
||||
#include <linux/types.h>
|
||||
#include "../types.h"
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
struct section {
|
||||
struct rb_node rb_node;
|
||||
u64 hash;
|
||||
u64 vma;
|
||||
char *name;
|
||||
char *path;
|
||||
};
|
||||
|
||||
struct sec_dso {
|
||||
struct list_head node;
|
||||
struct rb_root secs;
|
||||
struct section *(*find_section)(struct sec_dso *, const char *name);
|
||||
char name[0];
|
||||
};
|
||||
|
||||
struct module {
|
||||
struct rb_node rb_node;
|
||||
u64 hash;
|
||||
char *name;
|
||||
char *path;
|
||||
struct sec_dso *sections;
|
||||
int active;
|
||||
};
|
||||
|
||||
struct mod_dso {
|
||||
struct list_head node;
|
||||
struct rb_root mods;
|
||||
struct module *(*find_module)(struct mod_dso *, const char *name);
|
||||
char name[0];
|
||||
};
|
||||
|
||||
struct sec_dso *sec_dso__new_dso(const char *name);
|
||||
void sec_dso__delete_sections(struct sec_dso *self);
|
||||
void sec_dso__delete_self(struct sec_dso *self);
|
||||
size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp);
|
||||
struct section *sec_dso__find_section(struct sec_dso *self, const char *name);
|
||||
|
||||
struct mod_dso *mod_dso__new_dso(const char *name);
|
||||
void mod_dso__delete_modules(struct mod_dso *self);
|
||||
void mod_dso__delete_self(struct mod_dso *self);
|
||||
size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp);
|
||||
struct module *mod_dso__find_module(struct mod_dso *self, const char *name);
|
||||
int mod_dso__load_modules(struct mod_dso *dso);
|
||||
|
||||
#endif /* _PERF_MODULE_ */
|
@ -8,9 +8,10 @@
|
||||
#include "cache.h"
|
||||
#include "header.h"
|
||||
|
||||
int nr_counters;
|
||||
int nr_counters;
|
||||
|
||||
struct perf_event_attr attrs[MAX_COUNTERS];
|
||||
char *filters[MAX_COUNTERS];
|
||||
|
||||
struct event_symbol {
|
||||
u8 type;
|
||||
@ -708,7 +709,6 @@ static void store_event_type(const char *orgname)
|
||||
perf_header__push_event(id, orgname);
|
||||
}
|
||||
|
||||
|
||||
int parse_events(const struct option *opt __used, const char *str, int unset __used)
|
||||
{
|
||||
struct perf_event_attr attr;
|
||||
@ -745,6 +745,28 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
|
||||
return 0;
|
||||
}
|
||||
|
||||
int parse_filter(const struct option *opt __used, const char *str,
|
||||
int unset __used)
|
||||
{
|
||||
int i = nr_counters - 1;
|
||||
int len = strlen(str);
|
||||
|
||||
if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) {
|
||||
fprintf(stderr,
|
||||
"-F option should follow a -e tracepoint option\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
filters[i] = malloc(len + 1);
|
||||
if (!filters[i]) {
|
||||
fprintf(stderr, "not enough memory to hold filter string\n");
|
||||
return -1;
|
||||
}
|
||||
strcpy(filters[i], str);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char * const event_type_descriptors[] = {
|
||||
"",
|
||||
"Hardware event",
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef _PARSE_EVENTS_H
|
||||
#define _PARSE_EVENTS_H
|
||||
#ifndef __PERF_PARSE_EVENTS_H
|
||||
#define __PERF_PARSE_EVENTS_H
|
||||
/*
|
||||
* Parse symbolic events/counts passed in as options:
|
||||
*/
|
||||
@ -17,11 +17,13 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
|
||||
extern int nr_counters;
|
||||
|
||||
extern struct perf_event_attr attrs[MAX_COUNTERS];
|
||||
extern char *filters[MAX_COUNTERS];
|
||||
|
||||
extern const char *event_name(int ctr);
|
||||
extern const char *__event_name(int type, u64 config);
|
||||
|
||||
extern int parse_events(const struct option *opt, const char *str, int unset);
|
||||
extern int parse_filter(const struct option *opt, const char *str, int unset);
|
||||
|
||||
#define EVENTS_HELP_MAX (128*1024)
|
||||
|
||||
@ -31,4 +33,4 @@ extern char debugfs_path[];
|
||||
extern int valid_debugfs_mount(const char *debugfs);
|
||||
|
||||
|
||||
#endif /* _PARSE_EVENTS_H */
|
||||
#endif /* __PERF_PARSE_EVENTS_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef PARSE_OPTIONS_H
|
||||
#define PARSE_OPTIONS_H
|
||||
#ifndef __PERF_PARSE_OPTIONS_H
|
||||
#define __PERF_PARSE_OPTIONS_H
|
||||
|
||||
enum parse_opt_type {
|
||||
/* special types */
|
||||
@ -174,4 +174,4 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
|
||||
|
||||
extern const char *parse_options_fix_filename(const char *prefix, const char *file);
|
||||
|
||||
#endif
|
||||
#endif /* __PERF_PARSE_OPTIONS_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef QUOTE_H
|
||||
#define QUOTE_H
|
||||
#ifndef __PERF_QUOTE_H
|
||||
#define __PERF_QUOTE_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
@ -65,4 +65,4 @@ extern void perl_quote_print(FILE *stream, const char *src);
|
||||
extern void python_quote_print(FILE *stream, const char *src);
|
||||
extern void tcl_quote_print(FILE *stream, const char *src);
|
||||
|
||||
#endif
|
||||
#endif /* __PERF_QUOTE_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef RUN_COMMAND_H
|
||||
#define RUN_COMMAND_H
|
||||
#ifndef __PERF_RUN_COMMAND_H
|
||||
#define __PERF_RUN_COMMAND_H
|
||||
|
||||
enum {
|
||||
ERR_RUN_COMMAND_FORK = 10000,
|
||||
@ -85,4 +85,4 @@ struct async {
|
||||
int start_async(struct async *async);
|
||||
int finish_async(struct async *async);
|
||||
|
||||
#endif
|
||||
#endif /* __PERF_RUN_COMMAND_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef SIGCHAIN_H
|
||||
#define SIGCHAIN_H
|
||||
#ifndef __PERF_SIGCHAIN_H
|
||||
#define __PERF_SIGCHAIN_H
|
||||
|
||||
typedef void (*sigchain_fun)(int);
|
||||
|
||||
@ -8,4 +8,4 @@ int sigchain_pop(int sig);
|
||||
|
||||
void sigchain_push_common(sigchain_fun f);
|
||||
|
||||
#endif /* SIGCHAIN_H */
|
||||
#endif /* __PERF_SIGCHAIN_H */
|
||||
|
290
tools/perf/util/sort.c
Normal file
290
tools/perf/util/sort.c
Normal file
@ -0,0 +1,290 @@
|
||||
#include "sort.h"
|
||||
|
||||
regex_t parent_regex;
|
||||
char default_parent_pattern[] = "^sys_|^do_page_fault";
|
||||
char *parent_pattern = default_parent_pattern;
|
||||
char default_sort_order[] = "comm,dso,symbol";
|
||||
char *sort_order = default_sort_order;
|
||||
int sort__need_collapse = 0;
|
||||
int sort__has_parent = 0;
|
||||
|
||||
enum sort_type sort__first_dimension;
|
||||
|
||||
unsigned int dsos__col_width;
|
||||
unsigned int comms__col_width;
|
||||
unsigned int threads__col_width;
|
||||
static unsigned int parent_symbol__col_width;
|
||||
char * field_sep;
|
||||
|
||||
LIST_HEAD(hist_entry__sort_list);
|
||||
|
||||
struct sort_entry sort_thread = {
|
||||
.header = "Command: Pid",
|
||||
.cmp = sort__thread_cmp,
|
||||
.print = sort__thread_print,
|
||||
.width = &threads__col_width,
|
||||
};
|
||||
|
||||
struct sort_entry sort_comm = {
|
||||
.header = "Command",
|
||||
.cmp = sort__comm_cmp,
|
||||
.collapse = sort__comm_collapse,
|
||||
.print = sort__comm_print,
|
||||
.width = &comms__col_width,
|
||||
};
|
||||
|
||||
struct sort_entry sort_dso = {
|
||||
.header = "Shared Object",
|
||||
.cmp = sort__dso_cmp,
|
||||
.print = sort__dso_print,
|
||||
.width = &dsos__col_width,
|
||||
};
|
||||
|
||||
struct sort_entry sort_sym = {
|
||||
.header = "Symbol",
|
||||
.cmp = sort__sym_cmp,
|
||||
.print = sort__sym_print,
|
||||
};
|
||||
|
||||
struct sort_entry sort_parent = {
|
||||
.header = "Parent symbol",
|
||||
.cmp = sort__parent_cmp,
|
||||
.print = sort__parent_print,
|
||||
.width = &parent_symbol__col_width,
|
||||
};
|
||||
|
||||
struct sort_dimension {
|
||||
const char *name;
|
||||
struct sort_entry *entry;
|
||||
int taken;
|
||||
};
|
||||
|
||||
static struct sort_dimension sort_dimensions[] = {
|
||||
{ .name = "pid", .entry = &sort_thread, },
|
||||
{ .name = "comm", .entry = &sort_comm, },
|
||||
{ .name = "dso", .entry = &sort_dso, },
|
||||
{ .name = "symbol", .entry = &sort_sym, },
|
||||
{ .name = "parent", .entry = &sort_parent, },
|
||||
};
|
||||
|
||||
int64_t cmp_null(void *l, void *r)
|
||||
{
|
||||
if (!l && !r)
|
||||
return 0;
|
||||
else if (!l)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* --sort pid */
|
||||
|
||||
int64_t
|
||||
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
return right->thread->pid - left->thread->pid;
|
||||
}
|
||||
|
||||
int repsep_fprintf(FILE *fp, const char *fmt, ...)
|
||||
{
|
||||
int n;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
if (!field_sep)
|
||||
n = vfprintf(fp, fmt, ap);
|
||||
else {
|
||||
char *bf = NULL;
|
||||
n = vasprintf(&bf, fmt, ap);
|
||||
if (n > 0) {
|
||||
char *sep = bf;
|
||||
|
||||
while (1) {
|
||||
sep = strchr(sep, *field_sep);
|
||||
if (sep == NULL)
|
||||
break;
|
||||
*sep = '.';
|
||||
}
|
||||
}
|
||||
fputs(bf, fp);
|
||||
free(bf);
|
||||
}
|
||||
va_end(ap);
|
||||
return n;
|
||||
}
|
||||
|
||||
size_t
|
||||
sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||
{
|
||||
return repsep_fprintf(fp, "%*s:%5d", width - 6,
|
||||
self->thread->comm ?: "", self->thread->pid);
|
||||
}
|
||||
|
||||
size_t
|
||||
sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||
{
|
||||
return repsep_fprintf(fp, "%*s", width, self->thread->comm);
|
||||
}
|
||||
|
||||
/* --sort dso */
|
||||
|
||||
int64_t
|
||||
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct dso *dso_l = left->map ? left->map->dso : NULL;
|
||||
struct dso *dso_r = right->map ? right->map->dso : NULL;
|
||||
const char *dso_name_l, *dso_name_r;
|
||||
|
||||
if (!dso_l || !dso_r)
|
||||
return cmp_null(dso_l, dso_r);
|
||||
|
||||
if (verbose) {
|
||||
dso_name_l = dso_l->long_name;
|
||||
dso_name_r = dso_r->long_name;
|
||||
} else {
|
||||
dso_name_l = dso_l->short_name;
|
||||
dso_name_r = dso_r->short_name;
|
||||
}
|
||||
|
||||
return strcmp(dso_name_l, dso_name_r);
|
||||
}
|
||||
|
||||
size_t
|
||||
sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||
{
|
||||
if (self->map && self->map->dso) {
|
||||
const char *dso_name = !verbose ? self->map->dso->short_name :
|
||||
self->map->dso->long_name;
|
||||
return repsep_fprintf(fp, "%-*s", width, dso_name);
|
||||
}
|
||||
|
||||
return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
|
||||
}
|
||||
|
||||
/* --sort symbol */
|
||||
|
||||
int64_t
|
||||
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
u64 ip_l, ip_r;
|
||||
|
||||
if (left->sym == right->sym)
|
||||
return 0;
|
||||
|
||||
ip_l = left->sym ? left->sym->start : left->ip;
|
||||
ip_r = right->sym ? right->sym->start : right->ip;
|
||||
|
||||
return (int64_t)(ip_r - ip_l);
|
||||
}
|
||||
|
||||
|
||||
size_t
|
||||
sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
|
||||
{
|
||||
size_t ret = 0;
|
||||
|
||||
if (verbose) {
|
||||
char o = self->map ? dso__symtab_origin(self->map->dso) : '!';
|
||||
ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o);
|
||||
}
|
||||
|
||||
ret += repsep_fprintf(fp, "[%c] ", self->level);
|
||||
if (self->sym)
|
||||
ret += repsep_fprintf(fp, "%s", self->sym->name);
|
||||
else
|
||||
ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* --sort comm */
|
||||
|
||||
int64_t
|
||||
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
return right->thread->pid - left->thread->pid;
|
||||
}
|
||||
|
||||
int64_t
|
||||
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
char *comm_l = left->thread->comm;
|
||||
char *comm_r = right->thread->comm;
|
||||
|
||||
if (!comm_l || !comm_r)
|
||||
return cmp_null(comm_l, comm_r);
|
||||
|
||||
return strcmp(comm_l, comm_r);
|
||||
}
|
||||
|
||||
/* --sort parent */
|
||||
|
||||
int64_t
|
||||
sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct symbol *sym_l = left->parent;
|
||||
struct symbol *sym_r = right->parent;
|
||||
|
||||
if (!sym_l || !sym_r)
|
||||
return cmp_null(sym_l, sym_r);
|
||||
|
||||
return strcmp(sym_l->name, sym_r->name);
|
||||
}
|
||||
|
||||
size_t
|
||||
sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||
{
|
||||
return repsep_fprintf(fp, "%-*s", width,
|
||||
self->parent ? self->parent->name : "[other]");
|
||||
}
|
||||
|
||||
int sort_dimension__add(const char *tok)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
|
||||
struct sort_dimension *sd = &sort_dimensions[i];
|
||||
|
||||
if (sd->taken)
|
||||
continue;
|
||||
|
||||
if (strncasecmp(tok, sd->name, strlen(tok)))
|
||||
continue;
|
||||
|
||||
if (sd->entry->collapse)
|
||||
sort__need_collapse = 1;
|
||||
|
||||
if (sd->entry == &sort_parent) {
|
||||
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
|
||||
if (ret) {
|
||||
char err[BUFSIZ];
|
||||
|
||||
regerror(ret, &parent_regex, err, sizeof(err));
|
||||
fprintf(stderr, "Invalid regex: %s\n%s",
|
||||
parent_pattern, err);
|
||||
exit(-1);
|
||||
}
|
||||
sort__has_parent = 1;
|
||||
}
|
||||
|
||||
if (list_empty(&hist_entry__sort_list)) {
|
||||
if (!strcmp(sd->name, "pid"))
|
||||
sort__first_dimension = SORT_PID;
|
||||
else if (!strcmp(sd->name, "comm"))
|
||||
sort__first_dimension = SORT_COMM;
|
||||
else if (!strcmp(sd->name, "dso"))
|
||||
sort__first_dimension = SORT_DSO;
|
||||
else if (!strcmp(sd->name, "symbol"))
|
||||
sort__first_dimension = SORT_SYM;
|
||||
else if (!strcmp(sd->name, "parent"))
|
||||
sort__first_dimension = SORT_PARENT;
|
||||
}
|
||||
|
||||
list_add_tail(&sd->entry->list, &hist_entry__sort_list);
|
||||
sd->taken = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ESRCH;
|
||||
}
|
99
tools/perf/util/sort.h
Normal file
99
tools/perf/util/sort.h
Normal file
@ -0,0 +1,99 @@
|
||||
#ifndef __PERF_SORT_H
|
||||
#define __PERF_SORT_H
|
||||
#include "../builtin.h"
|
||||
|
||||
#include "util.h"
|
||||
|
||||
#include "color.h"
|
||||
#include <linux/list.h>
|
||||
#include "cache.h"
|
||||
#include <linux/rbtree.h>
|
||||
#include "symbol.h"
|
||||
#include "string.h"
|
||||
#include "callchain.h"
|
||||
#include "strlist.h"
|
||||
#include "values.h"
|
||||
|
||||
#include "../perf.h"
|
||||
#include "debug.h"
|
||||
#include "header.h"
|
||||
|
||||
#include "parse-options.h"
|
||||
#include "parse-events.h"
|
||||
|
||||
#include "thread.h"
|
||||
#include "sort.h"
|
||||
|
||||
extern regex_t parent_regex;
|
||||
extern char *sort_order;
|
||||
extern char default_parent_pattern[];
|
||||
extern char *parent_pattern;
|
||||
extern char default_sort_order[];
|
||||
extern int sort__need_collapse;
|
||||
extern int sort__has_parent;
|
||||
extern char *field_sep;
|
||||
extern struct sort_entry sort_comm;
|
||||
extern struct sort_entry sort_dso;
|
||||
extern struct sort_entry sort_sym;
|
||||
extern struct sort_entry sort_parent;
|
||||
extern unsigned int dsos__col_width;
|
||||
extern unsigned int comms__col_width;
|
||||
extern unsigned int threads__col_width;
|
||||
extern enum sort_type sort__first_dimension;
|
||||
|
||||
struct hist_entry {
|
||||
struct rb_node rb_node;
|
||||
u64 count;
|
||||
struct thread *thread;
|
||||
struct map *map;
|
||||
struct symbol *sym;
|
||||
u64 ip;
|
||||
char level;
|
||||
struct symbol *parent;
|
||||
struct callchain_node callchain;
|
||||
struct rb_root sorted_chain;
|
||||
};
|
||||
|
||||
enum sort_type {
|
||||
SORT_PID,
|
||||
SORT_COMM,
|
||||
SORT_DSO,
|
||||
SORT_SYM,
|
||||
SORT_PARENT
|
||||
};
|
||||
|
||||
/*
|
||||
* configurable sorting bits
|
||||
*/
|
||||
|
||||
struct sort_entry {
|
||||
struct list_head list;
|
||||
|
||||
const char *header;
|
||||
|
||||
int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
|
||||
int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
|
||||
size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
|
||||
unsigned int *width;
|
||||
bool elide;
|
||||
};
|
||||
|
||||
extern struct sort_entry sort_thread;
|
||||
extern struct list_head hist_entry__sort_list;
|
||||
|
||||
extern int repsep_fprintf(FILE *fp, const char *fmt, ...);
|
||||
extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
|
||||
extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
|
||||
extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int);
|
||||
extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used);
|
||||
extern int64_t cmp_null(void *, void *);
|
||||
extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *);
|
||||
extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *);
|
||||
extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *);
|
||||
extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *);
|
||||
extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *);
|
||||
extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *);
|
||||
extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int);
|
||||
extern int sort_dimension__add(const char *);
|
||||
|
||||
#endif /* __PERF_SORT_H */
|
@ -1,5 +1,5 @@
|
||||
#ifndef STRBUF_H
|
||||
#define STRBUF_H
|
||||
#ifndef __PERF_STRBUF_H
|
||||
#define __PERF_STRBUF_H
|
||||
|
||||
/*
|
||||
* Strbuf's can be use in many ways: as a byte array, or to store arbitrary
|
||||
@ -134,4 +134,4 @@ extern int launch_editor(const char *path, struct strbuf *buffer, const char *co
|
||||
extern int strbuf_branchname(struct strbuf *sb, const char *name);
|
||||
extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name);
|
||||
|
||||
#endif /* STRBUF_H */
|
||||
#endif /* __PERF_STRBUF_H */
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <string.h>
|
||||
#include "string.h"
|
||||
|
||||
static int hex(char ch)
|
||||
@ -32,3 +33,13 @@ int hex2u64(const char *ptr, u64 *long_val)
|
||||
|
||||
return p - ptr;
|
||||
}
|
||||
|
||||
char *strxfrchar(char *s, char from, char to)
|
||||
{
|
||||
char *p = s;
|
||||
|
||||
while ((p = strchr(p, from)) != NULL)
|
||||
*p++ = to;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
#ifndef _PERF_STRING_H_
|
||||
#define _PERF_STRING_H_
|
||||
#ifndef __PERF_STRING_H_
|
||||
#define __PERF_STRING_H_
|
||||
|
||||
#include "types.h"
|
||||
|
||||
int hex2u64(const char *ptr, u64 *val);
|
||||
char *strxfrchar(char *s, char from, char to);
|
||||
|
||||
#define _STR(x) #x
|
||||
#define STR(x) _STR(x)
|
||||
|
||||
#endif
|
||||
#endif /* __PERF_STRING_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef STRLIST_H_
|
||||
#define STRLIST_H_
|
||||
#ifndef __PERF_STRLIST_H
|
||||
#define __PERF_STRLIST_H
|
||||
|
||||
#include <linux/rbtree.h>
|
||||
#include <stdbool.h>
|
||||
@ -36,4 +36,4 @@ static inline unsigned int strlist__nr_entries(const struct strlist *self)
|
||||
}
|
||||
|
||||
int strlist__parse_list(struct strlist *self, const char *s);
|
||||
#endif /* STRLIST_H_ */
|
||||
#endif /* __PERF_STRLIST_H */
|
||||
|
@ -103,7 +103,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
|
||||
fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
|
||||
fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
|
||||
fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
|
||||
fprintf(svgfile, " rect.waiting { fill:rgb(214,214, 0); fill-opacity:0.3; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
|
||||
fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
|
||||
fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
|
||||
fprintf(svgfile, " rect.cpu { fill:rgb(192,192,192); fill-opacity:0.2; stroke-width:0.5; stroke:rgb(128,128,128); } \n");
|
||||
fprintf(svgfile, " rect.pstate { fill:rgb(128,128,128); fill-opacity:0.8; stroke-width:0; } \n");
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef _INCLUDE_GUARD_SVG_HELPER_
|
||||
#define _INCLUDE_GUARD_SVG_HELPER_
|
||||
#ifndef __PERF_SVGHELPER_H
|
||||
#define __PERF_SVGHELPER_H
|
||||
|
||||
#include "types.h"
|
||||
|
||||
@ -25,4 +25,4 @@ extern void svg_close(void);
|
||||
|
||||
extern int svg_page_width;
|
||||
|
||||
#endif
|
||||
#endif /* __PERF_SVGHELPER_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,11 +1,11 @@
|
||||
#ifndef _PERF_SYMBOL_
|
||||
#define _PERF_SYMBOL_ 1
|
||||
#ifndef __PERF_SYMBOL
|
||||
#define __PERF_SYMBOL 1
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <stdbool.h>
|
||||
#include "types.h"
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include "module.h"
|
||||
#include "event.h"
|
||||
|
||||
#ifdef HAVE_CPLUS_DEMANGLE
|
||||
@ -36,11 +36,6 @@ struct symbol {
|
||||
struct rb_node rb_node;
|
||||
u64 start;
|
||||
u64 end;
|
||||
u64 obj_start;
|
||||
u64 hist_sum;
|
||||
u64 *hist;
|
||||
struct module *module;
|
||||
void *priv;
|
||||
char name[0];
|
||||
};
|
||||
|
||||
@ -52,13 +47,11 @@ struct dso {
|
||||
unsigned char adjust_symbols;
|
||||
unsigned char slen_calculated;
|
||||
unsigned char origin;
|
||||
const char *short_name;
|
||||
char *long_name;
|
||||
char name[0];
|
||||
};
|
||||
|
||||
extern const char *sym_hist_filter;
|
||||
|
||||
typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym);
|
||||
|
||||
struct dso *dso__new(const char *name, unsigned int sym_priv_size);
|
||||
void dso__delete(struct dso *self);
|
||||
|
||||
@ -69,24 +62,23 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym)
|
||||
|
||||
struct symbol *dso__find_symbol(struct dso *self, u64 ip);
|
||||
|
||||
int dso__load_kernel(struct dso *self, const char *vmlinux,
|
||||
symbol_filter_t filter, int verbose, int modules);
|
||||
int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
|
||||
int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
|
||||
struct dso *dsos__findnew(const char *name);
|
||||
int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size,
|
||||
symbol_filter_t filter, int modules);
|
||||
struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size,
|
||||
bool *is_new);
|
||||
int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
|
||||
void dsos__fprintf(FILE *fp);
|
||||
|
||||
size_t dso__fprintf(struct dso *self, FILE *fp);
|
||||
char dso__symtab_origin(const struct dso *self);
|
||||
|
||||
int load_kernel(void);
|
||||
int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter);
|
||||
|
||||
void symbol__init(void);
|
||||
|
||||
extern struct list_head dsos;
|
||||
extern struct dso *kernel_dso;
|
||||
extern struct map *kernel_map;
|
||||
extern struct dso *vdso;
|
||||
extern struct dso *hypervisor_dso;
|
||||
extern const char *vmlinux_name;
|
||||
extern int modules;
|
||||
#endif /* _PERF_SYMBOL_ */
|
||||
#endif /* __PERF_SYMBOL */
|
||||
|
@ -6,6 +6,9 @@
|
||||
#include "util.h"
|
||||
#include "debug.h"
|
||||
|
||||
static struct rb_root threads;
|
||||
static struct thread *last_match;
|
||||
|
||||
static struct thread *thread__new(pid_t pid)
|
||||
{
|
||||
struct thread *self = calloc(1, sizeof(*self));
|
||||
@ -15,7 +18,8 @@ static struct thread *thread__new(pid_t pid)
|
||||
self->comm = malloc(32);
|
||||
if (self->comm)
|
||||
snprintf(self->comm, 32, ":%d", self->pid);
|
||||
INIT_LIST_HEAD(&self->maps);
|
||||
self->maps = RB_ROOT;
|
||||
INIT_LIST_HEAD(&self->removed_maps);
|
||||
}
|
||||
|
||||
return self;
|
||||
@ -29,21 +33,40 @@ int thread__set_comm(struct thread *self, const char *comm)
|
||||
return self->comm ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
int thread__comm_len(struct thread *self)
|
||||
{
|
||||
if (!self->comm_len) {
|
||||
if (!self->comm)
|
||||
return 0;
|
||||
self->comm_len = strlen(self->comm);
|
||||
}
|
||||
|
||||
return self->comm_len;
|
||||
}
|
||||
|
||||
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
struct map *pos;
|
||||
size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
|
||||
size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
|
||||
self->pid, self->comm);
|
||||
|
||||
list_for_each_entry(pos, &self->maps, node)
|
||||
for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
|
||||
pos = rb_entry(nd, struct map, rb_node);
|
||||
ret += map__fprintf(pos, fp);
|
||||
}
|
||||
|
||||
ret = fprintf(fp, "Removed maps:\n");
|
||||
|
||||
list_for_each_entry(pos, &self->removed_maps, node)
|
||||
ret += map__fprintf(pos, fp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct thread *
|
||||
threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
|
||||
struct thread *threads__findnew(pid_t pid)
|
||||
{
|
||||
struct rb_node **p = &threads->rb_node;
|
||||
struct rb_node **p = &threads.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct thread *th;
|
||||
|
||||
@ -52,15 +75,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
|
||||
* so most of the time we dont have to look up
|
||||
* the full rbtree:
|
||||
*/
|
||||
if (*last_match && (*last_match)->pid == pid)
|
||||
return *last_match;
|
||||
if (last_match && last_match->pid == pid)
|
||||
return last_match;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
th = rb_entry(parent, struct thread, rb_node);
|
||||
|
||||
if (th->pid == pid) {
|
||||
*last_match = th;
|
||||
last_match = th;
|
||||
return th;
|
||||
}
|
||||
|
||||
@ -73,17 +96,16 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
|
||||
th = thread__new(pid);
|
||||
if (th != NULL) {
|
||||
rb_link_node(&th->rb_node, parent, p);
|
||||
rb_insert_color(&th->rb_node, threads);
|
||||
*last_match = th;
|
||||
rb_insert_color(&th->rb_node, &threads);
|
||||
last_match = th;
|
||||
}
|
||||
|
||||
return th;
|
||||
}
|
||||
|
||||
struct thread *
|
||||
register_idle_thread(struct rb_root *threads, struct thread **last_match)
|
||||
struct thread *register_idle_thread(void)
|
||||
{
|
||||
struct thread *thread = threads__findnew(0, threads, last_match);
|
||||
struct thread *thread = threads__findnew(0);
|
||||
|
||||
if (!thread || thread__set_comm(thread, "swapper")) {
|
||||
fprintf(stderr, "problem inserting idle task.\n");
|
||||
@ -93,42 +115,82 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match)
|
||||
return thread;
|
||||
}
|
||||
|
||||
void thread__insert_map(struct thread *self, struct map *map)
|
||||
static void thread__remove_overlappings(struct thread *self, struct map *map)
|
||||
{
|
||||
struct map *pos, *tmp;
|
||||
struct rb_node *next = rb_first(&self->maps);
|
||||
|
||||
list_for_each_entry_safe(pos, tmp, &self->maps, node) {
|
||||
if (map__overlap(pos, map)) {
|
||||
if (verbose >= 2) {
|
||||
printf("overlapping maps:\n");
|
||||
map__fprintf(map, stdout);
|
||||
map__fprintf(pos, stdout);
|
||||
}
|
||||
while (next) {
|
||||
struct map *pos = rb_entry(next, struct map, rb_node);
|
||||
next = rb_next(&pos->rb_node);
|
||||
|
||||
if (map->start <= pos->start && map->end > pos->start)
|
||||
pos->start = map->end;
|
||||
if (!map__overlap(pos, map))
|
||||
continue;
|
||||
|
||||
if (map->end >= pos->end && map->start < pos->end)
|
||||
pos->end = map->start;
|
||||
|
||||
if (verbose >= 2) {
|
||||
printf("after collision:\n");
|
||||
map__fprintf(pos, stdout);
|
||||
}
|
||||
|
||||
if (pos->start >= pos->end) {
|
||||
list_del_init(&pos->node);
|
||||
free(pos);
|
||||
}
|
||||
if (verbose >= 2) {
|
||||
fputs("overlapping maps:\n", stderr);
|
||||
map__fprintf(map, stderr);
|
||||
map__fprintf(pos, stderr);
|
||||
}
|
||||
|
||||
rb_erase(&pos->rb_node, &self->maps);
|
||||
/*
|
||||
* We may have references to this map, for instance in some
|
||||
* hist_entry instances, so just move them to a separate
|
||||
* list.
|
||||
*/
|
||||
list_add_tail(&pos->node, &self->removed_maps);
|
||||
}
|
||||
}
|
||||
|
||||
void maps__insert(struct rb_root *maps, struct map *map)
|
||||
{
|
||||
struct rb_node **p = &maps->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
const u64 ip = map->start;
|
||||
struct map *m;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
m = rb_entry(parent, struct map, rb_node);
|
||||
if (ip < m->start)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
list_add_tail(&map->node, &self->maps);
|
||||
rb_link_node(&map->rb_node, parent, p);
|
||||
rb_insert_color(&map->rb_node, maps);
|
||||
}
|
||||
|
||||
struct map *maps__find(struct rb_root *maps, u64 ip)
|
||||
{
|
||||
struct rb_node **p = &maps->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct map *m;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
m = rb_entry(parent, struct map, rb_node);
|
||||
if (ip < m->start)
|
||||
p = &(*p)->rb_left;
|
||||
else if (ip > m->end)
|
||||
p = &(*p)->rb_right;
|
||||
else
|
||||
return m;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void thread__insert_map(struct thread *self, struct map *map)
|
||||
{
|
||||
thread__remove_overlappings(self, map);
|
||||
maps__insert(&self->maps, map);
|
||||
}
|
||||
|
||||
int thread__fork(struct thread *self, struct thread *parent)
|
||||
{
|
||||
struct map *map;
|
||||
struct rb_node *nd;
|
||||
|
||||
if (self->comm)
|
||||
free(self->comm);
|
||||
@ -136,7 +198,8 @@ int thread__fork(struct thread *self, struct thread *parent)
|
||||
if (!self->comm)
|
||||
return -ENOMEM;
|
||||
|
||||
list_for_each_entry(map, &parent->maps, node) {
|
||||
for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
|
||||
struct map *map = rb_entry(nd, struct map, rb_node);
|
||||
struct map *new = map__clone(map);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
@ -146,26 +209,12 @@ int thread__fork(struct thread *self, struct thread *parent)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct map *thread__find_map(struct thread *self, u64 ip)
|
||||
{
|
||||
struct map *pos;
|
||||
|
||||
if (self == NULL)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(pos, &self->maps, node)
|
||||
if (ip >= pos->start && ip <= pos->end)
|
||||
return pos;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t threads__fprintf(FILE *fp, struct rb_root *threads)
|
||||
size_t threads__fprintf(FILE *fp)
|
||||
{
|
||||
size_t ret = 0;
|
||||
struct rb_node *nd;
|
||||
|
||||
for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
|
||||
for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
|
||||
struct thread *pos = rb_entry(nd, struct thread, rb_node);
|
||||
|
||||
ret += thread__fprintf(pos, fp);
|
||||
|
@ -1,22 +1,37 @@
|
||||
#ifndef __PERF_THREAD_H
|
||||
#define __PERF_THREAD_H
|
||||
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/list.h>
|
||||
#include <unistd.h>
|
||||
#include "symbol.h"
|
||||
|
||||
struct thread {
|
||||
struct rb_node rb_node;
|
||||
struct list_head maps;
|
||||
struct rb_root maps;
|
||||
struct list_head removed_maps;
|
||||
pid_t pid;
|
||||
char shortname[3];
|
||||
char *comm;
|
||||
int comm_len;
|
||||
};
|
||||
|
||||
int thread__set_comm(struct thread *self, const char *comm);
|
||||
struct thread *
|
||||
threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match);
|
||||
struct thread *
|
||||
register_idle_thread(struct rb_root *threads, struct thread **last_match);
|
||||
int thread__comm_len(struct thread *self);
|
||||
struct thread *threads__findnew(pid_t pid);
|
||||
struct thread *register_idle_thread(void);
|
||||
void thread__insert_map(struct thread *self, struct map *map);
|
||||
int thread__fork(struct thread *self, struct thread *parent);
|
||||
struct map *thread__find_map(struct thread *self, u64 ip);
|
||||
size_t threads__fprintf(FILE *fp, struct rb_root *threads);
|
||||
size_t threads__fprintf(FILE *fp);
|
||||
|
||||
void maps__insert(struct rb_root *maps, struct map *map);
|
||||
struct map *maps__find(struct rb_root *maps, u64 ip);
|
||||
|
||||
struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp);
|
||||
struct map *kernel_maps__find_by_dso_name(const char *name);
|
||||
|
||||
static inline struct map *thread__find_map(struct thread *self, u64 ip)
|
||||
{
|
||||
return self ? maps__find(&self->maps, ip) : NULL;
|
||||
}
|
||||
|
||||
#endif /* __PERF_THREAD_H */
|
||||
|
@ -496,14 +496,12 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
|
||||
|
||||
return path.next;
|
||||
}
|
||||
void read_tracing_data(struct perf_event_attr *pattrs, int nb_events)
|
||||
void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
|
||||
{
|
||||
char buf[BUFSIZ];
|
||||
struct tracepoint_path *tps;
|
||||
|
||||
output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644);
|
||||
if (output_fd < 0)
|
||||
die("creating file '%s'", output_file);
|
||||
output_fd = fd;
|
||||
|
||||
buf[0] = 23;
|
||||
buf[1] = 8;
|
||||
|
@ -40,6 +40,8 @@ int header_page_size_size;
|
||||
int header_page_data_offset;
|
||||
int header_page_data_size;
|
||||
|
||||
int latency_format;
|
||||
|
||||
static char *input_buf;
|
||||
static unsigned long long input_buf_ptr;
|
||||
static unsigned long long input_buf_siz;
|
||||
@ -284,18 +286,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused)
|
||||
char *line;
|
||||
char *next = NULL;
|
||||
char *addr_str;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
line = strtok_r(file, "\n", &next);
|
||||
while (line) {
|
||||
addr_str = strsep(&line, ":");
|
||||
if (!line) {
|
||||
warning("error parsing print strings");
|
||||
break;
|
||||
}
|
||||
item = malloc_or_die(sizeof(*item));
|
||||
ret = sscanf(line, "%as : %as",
|
||||
(float *)(void *)&addr_str, /* workaround gcc warning */
|
||||
(float *)(void *)&item->printk);
|
||||
item->addr = strtoull(addr_str, NULL, 16);
|
||||
free(addr_str);
|
||||
|
||||
/* fmt still has a space, skip it */
|
||||
item->printk = strdup(line+1);
|
||||
item->next = list;
|
||||
list = item;
|
||||
line = strtok_r(NULL, "\n", &next);
|
||||
@ -522,7 +525,10 @@ static enum event_type __read_token(char **tok)
|
||||
last_ch = ch;
|
||||
ch = __read_char();
|
||||
buf[i++] = ch;
|
||||
} while (ch != quote_ch && last_ch != '\\');
|
||||
/* the '\' '\' will cancel itself */
|
||||
if (ch == '\\' && last_ch == '\\')
|
||||
last_ch = 0;
|
||||
} while (ch != quote_ch || last_ch == '\\');
|
||||
/* remove the last quote */
|
||||
i--;
|
||||
goto out;
|
||||
@ -610,7 +616,7 @@ static enum event_type read_token_item(char **tok)
|
||||
static int test_type(enum event_type type, enum event_type expect)
|
||||
{
|
||||
if (type != expect) {
|
||||
die("Error: expected type %d but read %d",
|
||||
warning("Error: expected type %d but read %d",
|
||||
expect, type);
|
||||
return -1;
|
||||
}
|
||||
@ -621,13 +627,13 @@ static int test_type_token(enum event_type type, char *token,
|
||||
enum event_type expect, const char *expect_tok)
|
||||
{
|
||||
if (type != expect) {
|
||||
die("Error: expected type %d but read %d",
|
||||
warning("Error: expected type %d but read %d",
|
||||
expect, type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (strcmp(token, expect_tok) != 0) {
|
||||
die("Error: expected '%s' but read '%s'",
|
||||
warning("Error: expected '%s' but read '%s'",
|
||||
expect_tok, token);
|
||||
return -1;
|
||||
}
|
||||
@ -665,7 +671,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_
|
||||
|
||||
free_token(token);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int read_expected(enum event_type expect, const char *str)
|
||||
@ -682,10 +688,10 @@ static char *event_read_name(void)
|
||||
{
|
||||
char *token;
|
||||
|
||||
if (read_expected(EVENT_ITEM, (char *)"name") < 0)
|
||||
if (read_expected(EVENT_ITEM, "name") < 0)
|
||||
return NULL;
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return NULL;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
@ -703,10 +709,10 @@ static int event_read_id(void)
|
||||
char *token;
|
||||
int id;
|
||||
|
||||
if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0)
|
||||
if (read_expected_item(EVENT_ITEM, "ID") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
@ -721,6 +727,24 @@ static int event_read_id(void)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int field_is_string(struct format_field *field)
|
||||
{
|
||||
if ((field->flags & FIELD_IS_ARRAY) &&
|
||||
(!strstr(field->type, "char") || !strstr(field->type, "u8") ||
|
||||
!strstr(field->type, "s8")))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int field_is_dynamic(struct format_field *field)
|
||||
{
|
||||
if (!strcmp(field->type, "__data_loc"))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int event_read_fields(struct event *event, struct format_field **fields)
|
||||
{
|
||||
struct format_field *field = NULL;
|
||||
@ -738,7 +762,7 @@ static int event_read_fields(struct event *event, struct format_field **fields)
|
||||
|
||||
count++;
|
||||
|
||||
if (test_type_token(type, token, EVENT_ITEM, (char *)"field"))
|
||||
if (test_type_token(type, token, EVENT_ITEM, "field"))
|
||||
goto fail;
|
||||
free_token(token);
|
||||
|
||||
@ -753,7 +777,7 @@ static int event_read_fields(struct event *event, struct format_field **fields)
|
||||
type = read_token(&token);
|
||||
}
|
||||
|
||||
if (test_type_token(type, token, EVENT_OP, (char *)":") < 0)
|
||||
if (test_type_token(type, token, EVENT_OP, ":") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
@ -865,14 +889,20 @@ static int event_read_fields(struct event *event, struct format_field **fields)
|
||||
free(brackets);
|
||||
}
|
||||
|
||||
if (test_type_token(type, token, EVENT_OP, (char *)";"))
|
||||
if (field_is_string(field)) {
|
||||
field->flags |= FIELD_IS_STRING;
|
||||
if (field_is_dynamic(field))
|
||||
field->flags |= FIELD_IS_DYNAMIC;
|
||||
}
|
||||
|
||||
if (test_type_token(type, token, EVENT_OP, ";"))
|
||||
goto fail;
|
||||
free_token(token);
|
||||
|
||||
if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
|
||||
if (read_expected(EVENT_ITEM, "offset") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token))
|
||||
@ -880,13 +910,13 @@ static int event_read_fields(struct event *event, struct format_field **fields)
|
||||
field->offset = strtoul(token, NULL, 0);
|
||||
free_token(token);
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)";") < 0)
|
||||
if (read_expected(EVENT_OP, ";") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expected(EVENT_ITEM, (char *)"size") < 0)
|
||||
if (read_expected(EVENT_ITEM, "size") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token))
|
||||
@ -894,11 +924,33 @@ static int event_read_fields(struct event *event, struct format_field **fields)
|
||||
field->size = strtoul(token, NULL, 0);
|
||||
free_token(token);
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)";") < 0)
|
||||
if (read_expected(EVENT_OP, ";") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expect_type(EVENT_NEWLINE, &token) < 0)
|
||||
goto fail;
|
||||
type = read_token(&token);
|
||||
if (type != EVENT_NEWLINE) {
|
||||
/* newer versions of the kernel have a "signed" type */
|
||||
if (test_type_token(type, token, EVENT_ITEM, "signed"))
|
||||
goto fail;
|
||||
|
||||
free_token(token);
|
||||
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token))
|
||||
goto fail;
|
||||
|
||||
/* add signed type */
|
||||
|
||||
free_token(token);
|
||||
if (read_expected(EVENT_OP, ";") < 0)
|
||||
goto fail_expect;
|
||||
|
||||
if (read_expect_type(EVENT_NEWLINE, &token))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
free_token(token);
|
||||
|
||||
*fields = field;
|
||||
@ -921,10 +973,10 @@ static int event_read_format(struct event *event)
|
||||
char *token;
|
||||
int ret;
|
||||
|
||||
if (read_expected_item(EVENT_ITEM, (char *)"format") < 0)
|
||||
if (read_expected_item(EVENT_ITEM, "format") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expect_type(EVENT_NEWLINE, &token))
|
||||
@ -984,7 +1036,7 @@ process_cond(struct event *event, struct print_arg *top, char **tok)
|
||||
|
||||
*tok = NULL;
|
||||
type = process_arg(event, left, &token);
|
||||
if (test_type_token(type, token, EVENT_OP, (char *)":"))
|
||||
if (test_type_token(type, token, EVENT_OP, ":"))
|
||||
goto out_free;
|
||||
|
||||
arg->op.op = token;
|
||||
@ -1004,6 +1056,35 @@ out_free:
|
||||
return EVENT_ERROR;
|
||||
}
|
||||
|
||||
static enum event_type
|
||||
process_array(struct event *event, struct print_arg *top, char **tok)
|
||||
{
|
||||
struct print_arg *arg;
|
||||
enum event_type type;
|
||||
char *token = NULL;
|
||||
|
||||
arg = malloc_or_die(sizeof(*arg));
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
|
||||
*tok = NULL;
|
||||
type = process_arg(event, arg, &token);
|
||||
if (test_type_token(type, token, EVENT_OP, "]"))
|
||||
goto out_free;
|
||||
|
||||
top->op.right = arg;
|
||||
|
||||
free_token(token);
|
||||
type = read_token_item(&token);
|
||||
*tok = token;
|
||||
|
||||
return type;
|
||||
|
||||
out_free:
|
||||
free_token(*tok);
|
||||
free_arg(arg);
|
||||
return EVENT_ERROR;
|
||||
}
|
||||
|
||||
static int get_op_prio(char *op)
|
||||
{
|
||||
if (!op[1]) {
|
||||
@ -1128,6 +1209,8 @@ process_op(struct event *event, struct print_arg *arg, char **tok)
|
||||
strcmp(token, "*") == 0 ||
|
||||
strcmp(token, "^") == 0 ||
|
||||
strcmp(token, "/") == 0 ||
|
||||
strcmp(token, "<") == 0 ||
|
||||
strcmp(token, ">") == 0 ||
|
||||
strcmp(token, "==") == 0 ||
|
||||
strcmp(token, "!=") == 0) {
|
||||
|
||||
@ -1144,17 +1227,46 @@ process_op(struct event *event, struct print_arg *arg, char **tok)
|
||||
|
||||
right = malloc_or_die(sizeof(*right));
|
||||
|
||||
type = process_arg(event, right, tok);
|
||||
type = read_token_item(&token);
|
||||
*tok = token;
|
||||
|
||||
/* could just be a type pointer */
|
||||
if ((strcmp(arg->op.op, "*") == 0) &&
|
||||
type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
|
||||
if (left->type != PRINT_ATOM)
|
||||
die("bad pointer type");
|
||||
left->atom.atom = realloc(left->atom.atom,
|
||||
sizeof(left->atom.atom) + 3);
|
||||
strcat(left->atom.atom, " *");
|
||||
*arg = *left;
|
||||
free(arg);
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
type = process_arg_token(event, right, tok, type);
|
||||
|
||||
arg->op.right = right;
|
||||
|
||||
} else if (strcmp(token, "[") == 0) {
|
||||
|
||||
left = malloc_or_die(sizeof(*left));
|
||||
*left = *arg;
|
||||
|
||||
arg->type = PRINT_OP;
|
||||
arg->op.op = token;
|
||||
arg->op.left = left;
|
||||
|
||||
arg->op.prio = 0;
|
||||
type = process_array(event, arg, tok);
|
||||
|
||||
} else {
|
||||
die("unknown op '%s'", token);
|
||||
warning("unknown op '%s'", token);
|
||||
event->flags |= EVENT_FL_FAILED;
|
||||
/* the arg is now the left side */
|
||||
return EVENT_NONE;
|
||||
}
|
||||
|
||||
|
||||
if (type == EVENT_OP) {
|
||||
int prio;
|
||||
|
||||
@ -1178,7 +1290,7 @@ process_entry(struct event *event __unused, struct print_arg *arg,
|
||||
char *field;
|
||||
char *token;
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)"->") < 0)
|
||||
if (read_expected(EVENT_OP, "->") < 0)
|
||||
return EVENT_ERROR;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
@ -1338,14 +1450,14 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok)
|
||||
do {
|
||||
free_token(token);
|
||||
type = read_token_item(&token);
|
||||
if (test_type_token(type, token, EVENT_OP, (char *)"{"))
|
||||
if (test_type_token(type, token, EVENT_OP, "{"))
|
||||
break;
|
||||
|
||||
arg = malloc_or_die(sizeof(*arg));
|
||||
|
||||
free_token(token);
|
||||
type = process_arg(event, arg, &token);
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)","))
|
||||
if (test_type_token(type, token, EVENT_DELIM, ","))
|
||||
goto out_free;
|
||||
|
||||
field = malloc_or_die(sizeof(*field));
|
||||
@ -1356,7 +1468,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok)
|
||||
|
||||
free_token(token);
|
||||
type = process_arg(event, arg, &token);
|
||||
if (test_type_token(type, token, EVENT_OP, (char *)"}"))
|
||||
if (test_type_token(type, token, EVENT_OP, "}"))
|
||||
goto out_free;
|
||||
|
||||
value = arg_eval(arg);
|
||||
@ -1391,13 +1503,13 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
arg->type = PRINT_FLAGS;
|
||||
|
||||
if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
|
||||
if (read_expected_item(EVENT_DELIM, "(") < 0)
|
||||
return EVENT_ERROR;
|
||||
|
||||
field = malloc_or_die(sizeof(*field));
|
||||
|
||||
type = process_arg(event, field, &token);
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)","))
|
||||
if (test_type_token(type, token, EVENT_DELIM, ","))
|
||||
goto out_free;
|
||||
|
||||
arg->flags.field = field;
|
||||
@ -1408,11 +1520,11 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
|
||||
type = read_token_item(&token);
|
||||
}
|
||||
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)","))
|
||||
if (test_type_token(type, token, EVENT_DELIM, ","))
|
||||
goto out_free;
|
||||
|
||||
type = process_fields(event, &arg->flags.flags, &token);
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
|
||||
if (test_type_token(type, token, EVENT_DELIM, ")"))
|
||||
goto out_free;
|
||||
|
||||
free_token(token);
|
||||
@ -1434,19 +1546,19 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok)
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
arg->type = PRINT_SYMBOL;
|
||||
|
||||
if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
|
||||
if (read_expected_item(EVENT_DELIM, "(") < 0)
|
||||
return EVENT_ERROR;
|
||||
|
||||
field = malloc_or_die(sizeof(*field));
|
||||
|
||||
type = process_arg(event, field, &token);
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)","))
|
||||
if (test_type_token(type, token, EVENT_DELIM, ","))
|
||||
goto out_free;
|
||||
|
||||
arg->symbol.field = field;
|
||||
|
||||
type = process_fields(event, &arg->symbol.symbols, &token);
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
|
||||
if (test_type_token(type, token, EVENT_DELIM, ")"))
|
||||
goto out_free;
|
||||
|
||||
free_token(token);
|
||||
@ -1463,7 +1575,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok)
|
||||
{
|
||||
struct print_arg *item_arg;
|
||||
enum event_type type;
|
||||
int ptr_cast = 0;
|
||||
char *token;
|
||||
|
||||
type = process_arg(event, arg, &token);
|
||||
@ -1471,28 +1582,13 @@ process_paren(struct event *event, struct print_arg *arg, char **tok)
|
||||
if (type == EVENT_ERROR)
|
||||
return EVENT_ERROR;
|
||||
|
||||
if (type == EVENT_OP) {
|
||||
/* handle the ptr casts */
|
||||
if (!strcmp(token, "*")) {
|
||||
/*
|
||||
* FIXME: should we zapp whitespaces before ')' ?
|
||||
* (may require a peek_token_item())
|
||||
*/
|
||||
if (__peek_char() == ')') {
|
||||
ptr_cast = 1;
|
||||
free_token(token);
|
||||
type = read_token_item(&token);
|
||||
}
|
||||
}
|
||||
if (!ptr_cast) {
|
||||
type = process_op(event, arg, &token);
|
||||
if (type == EVENT_OP)
|
||||
type = process_op(event, arg, &token);
|
||||
|
||||
if (type == EVENT_ERROR)
|
||||
return EVENT_ERROR;
|
||||
}
|
||||
}
|
||||
if (type == EVENT_ERROR)
|
||||
return EVENT_ERROR;
|
||||
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)")")) {
|
||||
if (test_type_token(type, token, EVENT_DELIM, ")")) {
|
||||
free_token(token);
|
||||
return EVENT_ERROR;
|
||||
}
|
||||
@ -1516,13 +1612,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok)
|
||||
item_arg = malloc_or_die(sizeof(*item_arg));
|
||||
|
||||
arg->type = PRINT_TYPE;
|
||||
if (ptr_cast) {
|
||||
char *old = arg->atom.atom;
|
||||
|
||||
arg->atom.atom = malloc_or_die(strlen(old + 3));
|
||||
sprintf(arg->atom.atom, "%s *", old);
|
||||
free(old);
|
||||
}
|
||||
arg->typecast.type = arg->atom.atom;
|
||||
arg->typecast.item = item_arg;
|
||||
type = process_arg_token(event, item_arg, &token, type);
|
||||
@ -1540,7 +1629,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok)
|
||||
enum event_type type;
|
||||
char *token;
|
||||
|
||||
if (read_expected(EVENT_DELIM, (char *)"(") < 0)
|
||||
if (read_expected(EVENT_DELIM, "(") < 0)
|
||||
return EVENT_ERROR;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
@ -1550,7 +1639,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok)
|
||||
arg->string.string = token;
|
||||
arg->string.offset = -1;
|
||||
|
||||
if (read_expected(EVENT_DELIM, (char *)")") < 0)
|
||||
if (read_expected(EVENT_DELIM, ")") < 0)
|
||||
return EVENT_ERROR;
|
||||
|
||||
type = read_token(&token);
|
||||
@ -1637,12 +1726,18 @@ process_arg_token(struct event *event, struct print_arg *arg,
|
||||
|
||||
static int event_read_print_args(struct event *event, struct print_arg **list)
|
||||
{
|
||||
enum event_type type;
|
||||
enum event_type type = EVENT_ERROR;
|
||||
struct print_arg *arg;
|
||||
char *token;
|
||||
int args = 0;
|
||||
|
||||
do {
|
||||
if (type == EVENT_NEWLINE) {
|
||||
free_token(token);
|
||||
type = read_token_item(&token);
|
||||
continue;
|
||||
}
|
||||
|
||||
arg = malloc_or_die(sizeof(*arg));
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
|
||||
@ -1683,18 +1778,19 @@ static int event_read_print(struct event *event)
|
||||
char *token;
|
||||
int ret;
|
||||
|
||||
if (read_expected_item(EVENT_ITEM, (char *)"print") < 0)
|
||||
if (read_expected_item(EVENT_ITEM, "print") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expected(EVENT_ITEM, (char *)"fmt") < 0)
|
||||
if (read_expected(EVENT_ITEM, "fmt") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return -1;
|
||||
|
||||
if (read_expect_type(EVENT_DQUOTE, &token) < 0)
|
||||
goto fail;
|
||||
|
||||
concat:
|
||||
event->print_fmt.format = token;
|
||||
event->print_fmt.args = NULL;
|
||||
|
||||
@ -1704,7 +1800,22 @@ static int event_read_print(struct event *event)
|
||||
if (type == EVENT_NONE)
|
||||
return 0;
|
||||
|
||||
if (test_type_token(type, token, EVENT_DELIM, (char *)","))
|
||||
/* Handle concatination of print lines */
|
||||
if (type == EVENT_DQUOTE) {
|
||||
char *cat;
|
||||
|
||||
cat = malloc_or_die(strlen(event->print_fmt.format) +
|
||||
strlen(token) + 1);
|
||||
strcpy(cat, event->print_fmt.format);
|
||||
strcat(cat, token);
|
||||
free_token(token);
|
||||
free_token(event->print_fmt.format);
|
||||
event->print_fmt.format = NULL;
|
||||
token = cat;
|
||||
goto concat;
|
||||
}
|
||||
|
||||
if (test_type_token(type, token, EVENT_DELIM, ","))
|
||||
goto fail;
|
||||
|
||||
free_token(token);
|
||||
@ -1713,7 +1824,7 @@ static int event_read_print(struct event *event)
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
|
||||
fail:
|
||||
free_token(token);
|
||||
@ -1822,37 +1933,67 @@ static int get_common_info(const char *type, int *offset, int *size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __parse_common(void *data, int *size, int *offset,
|
||||
const char *name)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!*size) {
|
||||
ret = get_common_info(name, offset, size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return read_size(data + *offset, *size);
|
||||
}
|
||||
|
||||
int trace_parse_common_type(void *data)
|
||||
{
|
||||
static int type_offset;
|
||||
static int type_size;
|
||||
int ret;
|
||||
|
||||
if (!type_size) {
|
||||
ret = get_common_info("common_type",
|
||||
&type_offset,
|
||||
&type_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return read_size(data + type_offset, type_size);
|
||||
return __parse_common(data, &type_size, &type_offset,
|
||||
"common_type");
|
||||
}
|
||||
|
||||
static int parse_common_pid(void *data)
|
||||
{
|
||||
static int pid_offset;
|
||||
static int pid_size;
|
||||
|
||||
return __parse_common(data, &pid_size, &pid_offset,
|
||||
"common_pid");
|
||||
}
|
||||
|
||||
static int parse_common_pc(void *data)
|
||||
{
|
||||
static int pc_offset;
|
||||
static int pc_size;
|
||||
|
||||
return __parse_common(data, &pc_size, &pc_offset,
|
||||
"common_preempt_count");
|
||||
}
|
||||
|
||||
static int parse_common_flags(void *data)
|
||||
{
|
||||
static int flags_offset;
|
||||
static int flags_size;
|
||||
|
||||
return __parse_common(data, &flags_size, &flags_offset,
|
||||
"common_flags");
|
||||
}
|
||||
|
||||
static int parse_common_lock_depth(void *data)
|
||||
{
|
||||
static int ld_offset;
|
||||
static int ld_size;
|
||||
int ret;
|
||||
|
||||
if (!pid_size) {
|
||||
ret = get_common_info("common_pid",
|
||||
&pid_offset,
|
||||
&pid_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
ret = __parse_common(data, &ld_size, &ld_offset,
|
||||
"common_lock_depth");
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
|
||||
return read_size(data + pid_offset, pid_size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct event *trace_find_event(int id)
|
||||
@ -1871,6 +2012,7 @@ static unsigned long long eval_num_arg(void *data, int size,
|
||||
{
|
||||
unsigned long long val = 0;
|
||||
unsigned long long left, right;
|
||||
struct print_arg *larg;
|
||||
|
||||
switch (arg->type) {
|
||||
case PRINT_NULL:
|
||||
@ -1897,6 +2039,26 @@ static unsigned long long eval_num_arg(void *data, int size,
|
||||
return 0;
|
||||
break;
|
||||
case PRINT_OP:
|
||||
if (strcmp(arg->op.op, "[") == 0) {
|
||||
/*
|
||||
* Arrays are special, since we don't want
|
||||
* to read the arg as is.
|
||||
*/
|
||||
if (arg->op.left->type != PRINT_FIELD)
|
||||
goto default_op; /* oops, all bets off */
|
||||
larg = arg->op.left;
|
||||
if (!larg->field.field) {
|
||||
larg->field.field =
|
||||
find_any_field(event, larg->field.name);
|
||||
if (!larg->field.field)
|
||||
die("field %s not found", larg->field.name);
|
||||
}
|
||||
right = eval_num_arg(data, size, event, arg->op.right);
|
||||
val = read_size(data + larg->field.field->offset +
|
||||
right * long_size, long_size);
|
||||
break;
|
||||
}
|
||||
default_op:
|
||||
left = eval_num_arg(data, size, event, arg->op.left);
|
||||
right = eval_num_arg(data, size, event, arg->op.right);
|
||||
switch (arg->op.op[0]) {
|
||||
@ -1947,6 +2109,12 @@ static unsigned long long eval_num_arg(void *data, int size,
|
||||
die("unknown op '%s'", arg->op.op);
|
||||
val = left == right;
|
||||
break;
|
||||
case '-':
|
||||
val = left - right;
|
||||
break;
|
||||
case '+':
|
||||
val = left + right;
|
||||
break;
|
||||
default:
|
||||
die("unknown op '%s'", arg->op.op);
|
||||
}
|
||||
@ -2145,8 +2313,9 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
|
||||
case 'u':
|
||||
case 'x':
|
||||
case 'i':
|
||||
bptr = (void *)(((unsigned long)bptr + (long_size - 1)) &
|
||||
~(long_size - 1));
|
||||
/* the pointers are always 4 bytes aligned */
|
||||
bptr = (void *)(((unsigned long)bptr + 3) &
|
||||
~3);
|
||||
switch (ls) {
|
||||
case 0:
|
||||
case 1:
|
||||
@ -2270,7 +2439,27 @@ static void pretty_print(void *data, int size, struct event *event)
|
||||
|
||||
for (; *ptr; ptr++) {
|
||||
ls = 0;
|
||||
if (*ptr == '%') {
|
||||
if (*ptr == '\\') {
|
||||
ptr++;
|
||||
switch (*ptr) {
|
||||
case 'n':
|
||||
printf("\n");
|
||||
break;
|
||||
case 't':
|
||||
printf("\t");
|
||||
break;
|
||||
case 'r':
|
||||
printf("\r");
|
||||
break;
|
||||
case '\\':
|
||||
printf("\\");
|
||||
break;
|
||||
default:
|
||||
printf("%c", *ptr);
|
||||
break;
|
||||
}
|
||||
|
||||
} else if (*ptr == '%') {
|
||||
saveptr = ptr;
|
||||
show_func = 0;
|
||||
cont_process:
|
||||
@ -2377,6 +2566,41 @@ static inline int log10_cpu(int nb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void print_lat_fmt(void *data, int size __unused)
|
||||
{
|
||||
unsigned int lat_flags;
|
||||
unsigned int pc;
|
||||
int lock_depth;
|
||||
int hardirq;
|
||||
int softirq;
|
||||
|
||||
lat_flags = parse_common_flags(data);
|
||||
pc = parse_common_pc(data);
|
||||
lock_depth = parse_common_lock_depth(data);
|
||||
|
||||
hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
|
||||
softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
|
||||
|
||||
printf("%c%c%c",
|
||||
(lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
|
||||
(lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
|
||||
'X' : '.',
|
||||
(lat_flags & TRACE_FLAG_NEED_RESCHED) ?
|
||||
'N' : '.',
|
||||
(hardirq && softirq) ? 'H' :
|
||||
hardirq ? 'h' : softirq ? 's' : '.');
|
||||
|
||||
if (pc)
|
||||
printf("%x", pc);
|
||||
else
|
||||
printf(".");
|
||||
|
||||
if (lock_depth < 0)
|
||||
printf(".");
|
||||
else
|
||||
printf("%d", lock_depth);
|
||||
}
|
||||
|
||||
/* taken from Linux, written by Frederic Weisbecker */
|
||||
static void print_graph_cpu(int cpu)
|
||||
{
|
||||
@ -2620,6 +2844,11 @@ pretty_print_func_ent(void *data, int size, struct event *event,
|
||||
|
||||
printf(" | ");
|
||||
|
||||
if (latency_format) {
|
||||
print_lat_fmt(data, size);
|
||||
printf(" | ");
|
||||
}
|
||||
|
||||
field = find_field(event, "func");
|
||||
if (!field)
|
||||
die("function entry does not have func field");
|
||||
@ -2663,6 +2892,11 @@ pretty_print_func_ret(void *data, int size __unused, struct event *event,
|
||||
|
||||
printf(" | ");
|
||||
|
||||
if (latency_format) {
|
||||
print_lat_fmt(data, size);
|
||||
printf(" | ");
|
||||
}
|
||||
|
||||
field = find_field(event, "rettime");
|
||||
if (!field)
|
||||
die("can't find rettime in return graph");
|
||||
@ -2724,7 +2958,7 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
|
||||
|
||||
event = trace_find_event(type);
|
||||
if (!event) {
|
||||
printf("ug! no event found for type %d\n", type);
|
||||
warning("ug! no event found for type %d", type);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2734,9 +2968,20 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
|
||||
return pretty_print_func_graph(data, size, event, cpu,
|
||||
pid, comm, secs, usecs);
|
||||
|
||||
printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ",
|
||||
comm, pid, cpu,
|
||||
secs, nsecs, event->name);
|
||||
if (latency_format) {
|
||||
printf("%8.8s-%-5d %3d",
|
||||
comm, pid, cpu);
|
||||
print_lat_fmt(data, size);
|
||||
} else
|
||||
printf("%16s-%-5d [%03d]", comm, pid, cpu);
|
||||
|
||||
printf(" %5lu.%06lu: %s: ", secs, usecs, event->name);
|
||||
|
||||
if (event->flags & EVENT_FL_FAILED) {
|
||||
printf("EVENT '%s' FAILED TO PARSE\n",
|
||||
event->name);
|
||||
return;
|
||||
}
|
||||
|
||||
pretty_print(data, size, event);
|
||||
printf("\n");
|
||||
@ -2807,46 +3052,71 @@ static void print_args(struct print_arg *args)
|
||||
}
|
||||
}
|
||||
|
||||
static void parse_header_field(char *type,
|
||||
static void parse_header_field(const char *field,
|
||||
int *offset, int *size)
|
||||
{
|
||||
char *token;
|
||||
int type;
|
||||
|
||||
if (read_expected(EVENT_ITEM, (char *)"field") < 0)
|
||||
if (read_expected(EVENT_ITEM, "field") < 0)
|
||||
return;
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return;
|
||||
|
||||
/* type */
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
return;
|
||||
goto fail;
|
||||
free_token(token);
|
||||
|
||||
if (read_expected(EVENT_ITEM, type) < 0)
|
||||
if (read_expected(EVENT_ITEM, field) < 0)
|
||||
return;
|
||||
if (read_expected(EVENT_OP, (char *)";") < 0)
|
||||
if (read_expected(EVENT_OP, ";") < 0)
|
||||
return;
|
||||
if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
|
||||
if (read_expected(EVENT_ITEM, "offset") < 0)
|
||||
return;
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return;
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
return;
|
||||
goto fail;
|
||||
*offset = atoi(token);
|
||||
free_token(token);
|
||||
if (read_expected(EVENT_OP, (char *)";") < 0)
|
||||
if (read_expected(EVENT_OP, ";") < 0)
|
||||
return;
|
||||
if (read_expected(EVENT_ITEM, (char *)"size") < 0)
|
||||
if (read_expected(EVENT_ITEM, "size") < 0)
|
||||
return;
|
||||
if (read_expected(EVENT_OP, (char *)":") < 0)
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return;
|
||||
if (read_expect_type(EVENT_ITEM, &token) < 0)
|
||||
return;
|
||||
goto fail;
|
||||
*size = atoi(token);
|
||||
free_token(token);
|
||||
if (read_expected(EVENT_OP, (char *)";") < 0)
|
||||
return;
|
||||
if (read_expect_type(EVENT_NEWLINE, &token) < 0)
|
||||
if (read_expected(EVENT_OP, ";") < 0)
|
||||
return;
|
||||
type = read_token(&token);
|
||||
if (type != EVENT_NEWLINE) {
|
||||
/* newer versions of the kernel have a "signed" type */
|
||||
if (type != EVENT_ITEM)
|
||||
goto fail;
|
||||
|
||||
if (strcmp(token, "signed") != 0)
|
||||
goto fail;
|
||||
|
||||
free_token(token);
|
||||
|
||||
if (read_expected(EVENT_OP, ":") < 0)
|
||||
return;
|
||||
|
||||
if (read_expect_type(EVENT_ITEM, &token))
|
||||
goto fail;
|
||||
|
||||
free_token(token);
|
||||
if (read_expected(EVENT_OP, ";") < 0)
|
||||
return;
|
||||
|
||||
if (read_expect_type(EVENT_NEWLINE, &token))
|
||||
goto fail;
|
||||
}
|
||||
fail:
|
||||
free_token(token);
|
||||
}
|
||||
|
||||
@ -2854,11 +3124,11 @@ int parse_header_page(char *buf, unsigned long size)
|
||||
{
|
||||
init_input_buf(buf, size);
|
||||
|
||||
parse_header_field((char *)"timestamp", &header_page_ts_offset,
|
||||
parse_header_field("timestamp", &header_page_ts_offset,
|
||||
&header_page_ts_size);
|
||||
parse_header_field((char *)"commit", &header_page_size_offset,
|
||||
parse_header_field("commit", &header_page_size_offset,
|
||||
&header_page_size_size);
|
||||
parse_header_field((char *)"data", &header_page_data_offset,
|
||||
parse_header_field("data", &header_page_data_offset,
|
||||
&header_page_data_size);
|
||||
|
||||
return 0;
|
||||
@ -2909,6 +3179,9 @@ int parse_ftrace_file(char *buf, unsigned long size)
|
||||
if (ret < 0)
|
||||
die("failed to read ftrace event print fmt");
|
||||
|
||||
/* New ftrace handles args */
|
||||
if (ret > 0)
|
||||
return 0;
|
||||
/*
|
||||
* The arguments for ftrace files are parsed by the fields.
|
||||
* Set up the fields as their arguments.
|
||||
@ -2926,7 +3199,7 @@ int parse_ftrace_file(char *buf, unsigned long size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int parse_event_file(char *buf, unsigned long size, char *system__unused __unused)
|
||||
int parse_event_file(char *buf, unsigned long size, char *sys)
|
||||
{
|
||||
struct event *event;
|
||||
int ret;
|
||||
@ -2946,12 +3219,18 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse
|
||||
die("failed to read event id");
|
||||
|
||||
ret = event_read_format(event);
|
||||
if (ret < 0)
|
||||
die("failed to read event format");
|
||||
if (ret < 0) {
|
||||
warning("failed to read event format for %s", event->name);
|
||||
goto event_failed;
|
||||
}
|
||||
|
||||
ret = event_read_print(event);
|
||||
if (ret < 0)
|
||||
die("failed to read event print fmt");
|
||||
if (ret < 0) {
|
||||
warning("failed to read event print fmt for %s", event->name);
|
||||
goto event_failed;
|
||||
}
|
||||
|
||||
event->system = strdup(sys);
|
||||
|
||||
#define PRINT_ARGS 0
|
||||
if (PRINT_ARGS && event->print_fmt.args)
|
||||
@ -2959,6 +3238,12 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse
|
||||
|
||||
add_event(event);
|
||||
return 0;
|
||||
|
||||
event_failed:
|
||||
event->flags |= EVENT_FL_FAILED;
|
||||
/* still add it even if it failed */
|
||||
add_event(event);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void parse_set_info(int nr_cpus, int long_sz)
|
||||
|
@ -458,9 +458,8 @@ struct record *trace_read_data(int cpu)
|
||||
return data;
|
||||
}
|
||||
|
||||
void trace_report(void)
|
||||
void trace_report(int fd)
|
||||
{
|
||||
const char *input_file = "trace.info";
|
||||
char buf[BUFSIZ];
|
||||
char test[] = { 23, 8, 68 };
|
||||
char *version;
|
||||
@ -468,9 +467,7 @@ void trace_report(void)
|
||||
int show_funcs = 0;
|
||||
int show_printk = 0;
|
||||
|
||||
input_fd = open(input_file, O_RDONLY);
|
||||
if (input_fd < 0)
|
||||
die("opening '%s'\n", input_file);
|
||||
input_fd = fd;
|
||||
|
||||
read_or_die(buf, 3);
|
||||
if (memcmp(buf, test, 3) != 0)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef _TRACE_EVENTS_H
|
||||
#define _TRACE_EVENTS_H
|
||||
#ifndef __PERF_TRACE_EVENTS_H
|
||||
#define __PERF_TRACE_EVENTS_H
|
||||
|
||||
#include "parse-events.h"
|
||||
|
||||
@ -26,6 +26,9 @@ enum {
|
||||
enum format_flags {
|
||||
FIELD_IS_ARRAY = 1,
|
||||
FIELD_IS_POINTER = 2,
|
||||
FIELD_IS_SIGNED = 4,
|
||||
FIELD_IS_STRING = 8,
|
||||
FIELD_IS_DYNAMIC = 16,
|
||||
};
|
||||
|
||||
struct format_field {
|
||||
@ -132,15 +135,18 @@ struct event {
|
||||
int flags;
|
||||
struct format format;
|
||||
struct print_fmt print_fmt;
|
||||
char *system;
|
||||
};
|
||||
|
||||
enum {
|
||||
EVENT_FL_ISFTRACE = 1,
|
||||
EVENT_FL_ISPRINT = 2,
|
||||
EVENT_FL_ISBPRINT = 4,
|
||||
EVENT_FL_ISFUNC = 8,
|
||||
EVENT_FL_ISFUNCENT = 16,
|
||||
EVENT_FL_ISFUNCRET = 32,
|
||||
EVENT_FL_ISFTRACE = 0x01,
|
||||
EVENT_FL_ISPRINT = 0x02,
|
||||
EVENT_FL_ISBPRINT = 0x04,
|
||||
EVENT_FL_ISFUNC = 0x08,
|
||||
EVENT_FL_ISFUNCENT = 0x10,
|
||||
EVENT_FL_ISFUNCRET = 0x20,
|
||||
|
||||
EVENT_FL_FAILED = 0x80000000
|
||||
};
|
||||
|
||||
struct record {
|
||||
@ -154,7 +160,7 @@ struct record *trace_read_data(int cpu);
|
||||
|
||||
void parse_set_info(int nr_cpus, int long_sz);
|
||||
|
||||
void trace_report(void);
|
||||
void trace_report(int fd);
|
||||
|
||||
void *malloc_or_die(unsigned int size);
|
||||
|
||||
@ -166,7 +172,7 @@ void print_funcs(void);
|
||||
void print_printk(void);
|
||||
|
||||
int parse_ftrace_file(char *buf, unsigned long size);
|
||||
int parse_event_file(char *buf, unsigned long size, char *system);
|
||||
int parse_event_file(char *buf, unsigned long size, char *sys);
|
||||
void print_event(int cpu, void *data, int size, unsigned long long nsecs,
|
||||
char *comm);
|
||||
|
||||
@ -233,6 +239,8 @@ extern int header_page_size_size;
|
||||
extern int header_page_data_offset;
|
||||
extern int header_page_data_size;
|
||||
|
||||
extern int latency_format;
|
||||
|
||||
int parse_header_page(char *buf, unsigned long size);
|
||||
int trace_parse_common_type(void *data);
|
||||
struct event *trace_find_event(int id);
|
||||
@ -240,6 +248,15 @@ unsigned long long
|
||||
raw_field_value(struct event *event, const char *name, void *data);
|
||||
void *raw_field_ptr(struct event *event, const char *name, void *data);
|
||||
|
||||
void read_tracing_data(struct perf_event_attr *pattrs, int nb_events);
|
||||
void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events);
|
||||
|
||||
#endif /* _TRACE_EVENTS_H */
|
||||
/* taken from kernel/trace/trace.h */
|
||||
enum trace_flag_type {
|
||||
TRACE_FLAG_IRQS_OFF = 0x01,
|
||||
TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
|
||||
TRACE_FLAG_NEED_RESCHED = 0x04,
|
||||
TRACE_FLAG_HARDIRQ = 0x08,
|
||||
TRACE_FLAG_SOFTIRQ = 0x10,
|
||||
};
|
||||
|
||||
#endif /* __PERF_TRACE_EVENTS_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef _PERF_TYPES_H
|
||||
#define _PERF_TYPES_H
|
||||
#ifndef __PERF_TYPES_H
|
||||
#define __PERF_TYPES_H
|
||||
|
||||
/*
|
||||
* We define u64 as unsigned long long for every architecture
|
||||
@ -14,4 +14,4 @@ typedef signed short s16;
|
||||
typedef unsigned char u8;
|
||||
typedef signed char s8;
|
||||
|
||||
#endif /* _PERF_TYPES_H */
|
||||
#endif /* __PERF_TYPES_H */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef _PERF_VALUES_H
|
||||
#define _PERF_VALUES_H
|
||||
#ifndef __PERF_VALUES_H
|
||||
#define __PERF_VALUES_H
|
||||
|
||||
#include "types.h"
|
||||
|
||||
@ -24,4 +24,4 @@ void perf_read_values_add_value(struct perf_read_values *values,
|
||||
void perf_read_values_display(FILE *fp, struct perf_read_values *values,
|
||||
int raw);
|
||||
|
||||
#endif /* _PERF_VALUES_H */
|
||||
#endif /* __PERF_VALUES_H */
|
||||
|
Loading…
Reference in New Issue
Block a user