mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 20:01:55 +00:00
perf/core improvements and fixes:
BPF: Song Liu: - Introduce PERF_RECORD_KSYMBOL to allow tooling to notice the addition of new kernel symbols and be able to resolve samples in such symbols. - Introduce PERF_RECORD_BPF_EVENT to notify tooling about the loading and unloading of BPF programs, making them visible and allowing for the request of further information to allow for things like annotation. - Change the userspace perf tools to handle those new events and to synthesize them for pre-existing loaded BPF programs. Kernel: Arnaldo Carvalho de Melo: - Make perf_event_output() propagate the output() return, allowing users to check for -ENOSPC in the ring buffer. perf report: Thomas Richter: - Display arch specific diagnostic counter sets, starting with s390 diagnostic counter sets. perf session: Jiri Olsa: - Introduce a reader object to prep for multithreaded processing of recorded events. Misc: Rasmus Villemoes: - Replace automatic const char[] variables by statics, to avoid initializing them at runtime, instead having them in .rodata, reducing code size. YueHaibing: - Remove duplicated workqueue.h include from perf_event.h Brajeswar Ghosh: - Remove some more duplicated headers. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCXEZbWgAKCRCyPKLppCJ+ J9rPAP0cYnWEKYbGA0mVU2SMg5hWEmd8AyByhs1heXc5AW9xeAD+L0w7t0TzALW2 q6gvVX93dairowD+UJ5FUMYfDDfwYQA= =si0E -----END PGP SIGNATURE----- Merge tag 'perf-core-for-mingo-5.0-20190121' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: BPF: Song Liu: - Introduce PERF_RECORD_KSYMBOL to allow tooling to notice the addition of new kernel symbols and be able to resolve samples in such symbols. - Introduce PERF_RECORD_BPF_EVENT to notify tooling about the loading and unloading of BPF programs, making them visible and allowing for the request of further information to allow for things like annotation. - Change the userspace perf tools to handle those new events and to synthesize them for pre-existing loaded BPF programs. Kernel: Arnaldo Carvalho de Melo: - Make perf_event_output() propagate the output() return, allowing users to check for -ENOSPC in the ring buffer. perf report: Thomas Richter: - Display arch specific diagnostic counter sets, starting with s390 diagnostic counter sets. perf session: Jiri Olsa: - Introduce a reader object to prep for multithreaded processing of recorded events. Misc: Rasmus Villemoes: - Replace automatic const char[] variables by statics, to avoid initializing them at runtime, instead having them in .rodata, reducing code size. YueHaibing: - Remove duplicated workqueue.h include from perf_event.h Brajeswar Ghosh: - Remove some more duplicated headers. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
f575494d4a
@ -951,6 +951,7 @@ bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
|
||||
void bpf_prog_kallsyms_add(struct bpf_prog *fp);
|
||||
void bpf_prog_kallsyms_del(struct bpf_prog *fp);
|
||||
void bpf_get_prog_name(const struct bpf_prog *prog, char *sym);
|
||||
|
||||
#else /* CONFIG_BPF_JIT */
|
||||
|
||||
@ -1006,6 +1007,12 @@ static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
|
||||
static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
|
||||
{
|
||||
sym[0] = '\0';
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
|
||||
|
@ -53,7 +53,6 @@ struct perf_guest_info_callbacks {
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/perf_regs.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <asm/local.h>
|
||||
|
||||
@ -979,9 +978,9 @@ extern void perf_event_output_forward(struct perf_event *event,
|
||||
extern void perf_event_output_backward(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
extern void perf_event_output(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
extern int perf_event_output(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
|
||||
static inline bool
|
||||
is_default_overflow_handler(struct perf_event *event)
|
||||
@ -1123,6 +1122,13 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
|
||||
}
|
||||
|
||||
extern void perf_event_mmap(struct vm_area_struct *vma);
|
||||
|
||||
extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
|
||||
bool unregister, const char *sym);
|
||||
extern void perf_event_bpf_event(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type,
|
||||
u16 flags);
|
||||
|
||||
extern struct perf_guest_info_callbacks *perf_guest_cbs;
|
||||
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||
@ -1343,6 +1349,13 @@ static inline int perf_unregister_guest_info_callbacks
|
||||
(struct perf_guest_info_callbacks *callbacks) { return 0; }
|
||||
|
||||
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
|
||||
|
||||
typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
|
||||
static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
|
||||
bool unregister, const char *sym) { }
|
||||
static inline void perf_event_bpf_event(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type,
|
||||
u16 flags) { }
|
||||
static inline void perf_event_exec(void) { }
|
||||
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
|
||||
static inline void perf_event_namespaces(struct task_struct *tsk) { }
|
||||
|
@ -372,7 +372,9 @@ struct perf_event_attr {
|
||||
context_switch : 1, /* context switch data */
|
||||
write_backward : 1, /* Write ring buffer from end to beginning */
|
||||
namespaces : 1, /* include namespaces data */
|
||||
__reserved_1 : 35;
|
||||
ksymbol : 1, /* include ksymbol events */
|
||||
bpf_event : 1, /* include bpf events */
|
||||
__reserved_1 : 33;
|
||||
|
||||
union {
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
@ -963,9 +965,58 @@ enum perf_event_type {
|
||||
*/
|
||||
PERF_RECORD_NAMESPACES = 16,
|
||||
|
||||
/*
|
||||
* Record ksymbol register/unregister events:
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 addr;
|
||||
* u32 len;
|
||||
* u16 ksym_type;
|
||||
* u16 flags;
|
||||
* char name[];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_KSYMBOL = 17,
|
||||
|
||||
/*
|
||||
* Record bpf events:
|
||||
* enum perf_bpf_event_type {
|
||||
* PERF_BPF_EVENT_UNKNOWN = 0,
|
||||
* PERF_BPF_EVENT_PROG_LOAD = 1,
|
||||
* PERF_BPF_EVENT_PROG_UNLOAD = 2,
|
||||
* };
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u16 type;
|
||||
* u16 flags;
|
||||
* u32 id;
|
||||
* u8 tag[BPF_TAG_SIZE];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_BPF_EVENT = 18,
|
||||
|
||||
PERF_RECORD_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_record_ksymbol_type {
|
||||
PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
|
||||
PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
|
||||
PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
|
||||
|
||||
enum perf_bpf_event_type {
|
||||
PERF_BPF_EVENT_UNKNOWN = 0,
|
||||
PERF_BPF_EVENT_PROG_LOAD = 1,
|
||||
PERF_BPF_EVENT_PROG_UNLOAD = 2,
|
||||
PERF_BPF_EVENT_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_MAX_STACK_DEPTH 127
|
||||
#define PERF_MAX_CONTEXTS_PER_STACK 8
|
||||
|
||||
|
@ -495,7 +495,7 @@ bpf_get_prog_addr_region(const struct bpf_prog *prog,
|
||||
*symbol_end = addr + hdr->pages * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
|
||||
void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
|
||||
{
|
||||
const char *end = sym + KSYM_NAME_LEN;
|
||||
const struct btf_type *type;
|
||||
|
@ -1211,6 +1211,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||
{
|
||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
||||
/* bpf_prog_free_id() must be called first */
|
||||
bpf_prog_free_id(prog, do_idr_lock);
|
||||
bpf_prog_kallsyms_del_all(prog);
|
||||
@ -1554,6 +1555,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||
}
|
||||
|
||||
bpf_prog_kallsyms_add(prog);
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
|
||||
return err;
|
||||
|
||||
free_used_maps:
|
||||
|
@ -385,6 +385,8 @@ static atomic_t nr_namespaces_events __read_mostly;
|
||||
static atomic_t nr_task_events __read_mostly;
|
||||
static atomic_t nr_freq_events __read_mostly;
|
||||
static atomic_t nr_switch_events __read_mostly;
|
||||
static atomic_t nr_ksymbol_events __read_mostly;
|
||||
static atomic_t nr_bpf_events __read_mostly;
|
||||
|
||||
static LIST_HEAD(pmus);
|
||||
static DEFINE_MUTEX(pmus_lock);
|
||||
@ -4235,7 +4237,7 @@ static bool is_sb_event(struct perf_event *event)
|
||||
|
||||
if (attr->mmap || attr->mmap_data || attr->mmap2 ||
|
||||
attr->comm || attr->comm_exec ||
|
||||
attr->task ||
|
||||
attr->task || attr->ksymbol ||
|
||||
attr->context_switch)
|
||||
return true;
|
||||
return false;
|
||||
@ -4305,6 +4307,10 @@ static void unaccount_event(struct perf_event *event)
|
||||
dec = true;
|
||||
if (has_branch_stack(event))
|
||||
dec = true;
|
||||
if (event->attr.ksymbol)
|
||||
atomic_dec(&nr_ksymbol_events);
|
||||
if (event->attr.bpf_event)
|
||||
atomic_dec(&nr_bpf_events);
|
||||
|
||||
if (dec) {
|
||||
if (!atomic_add_unless(&perf_sched_count, -1, 1))
|
||||
@ -6489,7 +6495,7 @@ void perf_prepare_sample(struct perf_event_header *header,
|
||||
data->phys_addr = perf_virt_to_phys(data->addr);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
static __always_inline int
|
||||
__perf_event_output(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs,
|
||||
@ -6499,13 +6505,15 @@ __perf_event_output(struct perf_event *event,
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct perf_event_header header;
|
||||
int err;
|
||||
|
||||
/* protect the callchain buffers */
|
||||
rcu_read_lock();
|
||||
|
||||
perf_prepare_sample(&header, data, event, regs);
|
||||
|
||||
if (output_begin(&handle, event, header.size))
|
||||
err = output_begin(&handle, event, header.size);
|
||||
if (err)
|
||||
goto exit;
|
||||
|
||||
perf_output_sample(&handle, &header, data, event);
|
||||
@ -6514,6 +6522,7 @@ __perf_event_output(struct perf_event *event,
|
||||
|
||||
exit:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
void
|
||||
@ -6532,12 +6541,12 @@ perf_event_output_backward(struct perf_event *event,
|
||||
__perf_event_output(event, data, regs, perf_output_begin_backward);
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
perf_event_output(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
__perf_event_output(event, data, regs, perf_output_begin);
|
||||
return __perf_event_output(event, data, regs, perf_output_begin);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7650,6 +7659,207 @@ static void perf_log_throttle(struct perf_event *event, int enable)
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* ksymbol register/unregister tracking
|
||||
*/
|
||||
|
||||
struct perf_ksymbol_event {
|
||||
const char *name;
|
||||
int name_len;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u64 addr;
|
||||
u32 len;
|
||||
u16 ksym_type;
|
||||
u16 flags;
|
||||
} event_id;
|
||||
};
|
||||
|
||||
static int perf_event_ksymbol_match(struct perf_event *event)
|
||||
{
|
||||
return event->attr.ksymbol;
|
||||
}
|
||||
|
||||
static void perf_event_ksymbol_output(struct perf_event *event, void *data)
|
||||
{
|
||||
struct perf_ksymbol_event *ksymbol_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int ret;
|
||||
|
||||
if (!perf_event_ksymbol_match(event))
|
||||
return;
|
||||
|
||||
perf_event_header__init_id(&ksymbol_event->event_id.header,
|
||||
&sample, event);
|
||||
ret = perf_output_begin(&handle, event,
|
||||
ksymbol_event->event_id.header.size);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, ksymbol_event->event_id);
|
||||
__output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
|
||||
const char *sym)
|
||||
{
|
||||
struct perf_ksymbol_event ksymbol_event;
|
||||
char name[KSYM_NAME_LEN];
|
||||
u16 flags = 0;
|
||||
int name_len;
|
||||
|
||||
if (!atomic_read(&nr_ksymbol_events))
|
||||
return;
|
||||
|
||||
if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
|
||||
ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
|
||||
goto err;
|
||||
|
||||
strlcpy(name, sym, KSYM_NAME_LEN);
|
||||
name_len = strlen(name) + 1;
|
||||
while (!IS_ALIGNED(name_len, sizeof(u64)))
|
||||
name[name_len++] = '\0';
|
||||
BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
|
||||
|
||||
if (unregister)
|
||||
flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
|
||||
|
||||
ksymbol_event = (struct perf_ksymbol_event){
|
||||
.name = name,
|
||||
.name_len = name_len,
|
||||
.event_id = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_KSYMBOL,
|
||||
.size = sizeof(ksymbol_event.event_id) +
|
||||
name_len,
|
||||
},
|
||||
.addr = addr,
|
||||
.len = len,
|
||||
.ksym_type = ksym_type,
|
||||
.flags = flags,
|
||||
},
|
||||
};
|
||||
|
||||
perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
|
||||
return;
|
||||
err:
|
||||
WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
|
||||
}
|
||||
|
||||
/*
|
||||
* bpf program load/unload tracking
|
||||
*/
|
||||
|
||||
struct perf_bpf_event {
|
||||
struct bpf_prog *prog;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u16 type;
|
||||
u16 flags;
|
||||
u32 id;
|
||||
u8 tag[BPF_TAG_SIZE];
|
||||
} event_id;
|
||||
};
|
||||
|
||||
static int perf_event_bpf_match(struct perf_event *event)
|
||||
{
|
||||
return event->attr.bpf_event;
|
||||
}
|
||||
|
||||
static void perf_event_bpf_output(struct perf_event *event, void *data)
|
||||
{
|
||||
struct perf_bpf_event *bpf_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int ret;
|
||||
|
||||
if (!perf_event_bpf_match(event))
|
||||
return;
|
||||
|
||||
perf_event_header__init_id(&bpf_event->event_id.header,
|
||||
&sample, event);
|
||||
ret = perf_output_begin(&handle, event,
|
||||
bpf_event->event_id.header.size);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, bpf_event->event_id);
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type)
|
||||
{
|
||||
bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
|
||||
char sym[KSYM_NAME_LEN];
|
||||
int i;
|
||||
|
||||
if (prog->aux->func_cnt == 0) {
|
||||
bpf_get_prog_name(prog, sym);
|
||||
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
|
||||
(u64)(unsigned long)prog->bpf_func,
|
||||
prog->jited_len, unregister, sym);
|
||||
} else {
|
||||
for (i = 0; i < prog->aux->func_cnt; i++) {
|
||||
struct bpf_prog *subprog = prog->aux->func[i];
|
||||
|
||||
bpf_get_prog_name(subprog, sym);
|
||||
perf_event_ksymbol(
|
||||
PERF_RECORD_KSYMBOL_TYPE_BPF,
|
||||
(u64)(unsigned long)subprog->bpf_func,
|
||||
subprog->jited_len, unregister, sym);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void perf_event_bpf_event(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type,
|
||||
u16 flags)
|
||||
{
|
||||
struct perf_bpf_event bpf_event;
|
||||
|
||||
if (type <= PERF_BPF_EVENT_UNKNOWN ||
|
||||
type >= PERF_BPF_EVENT_MAX)
|
||||
return;
|
||||
|
||||
switch (type) {
|
||||
case PERF_BPF_EVENT_PROG_LOAD:
|
||||
case PERF_BPF_EVENT_PROG_UNLOAD:
|
||||
if (atomic_read(&nr_ksymbol_events))
|
||||
perf_event_bpf_emit_ksymbols(prog, type);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!atomic_read(&nr_bpf_events))
|
||||
return;
|
||||
|
||||
bpf_event = (struct perf_bpf_event){
|
||||
.prog = prog,
|
||||
.event_id = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_BPF_EVENT,
|
||||
.size = sizeof(bpf_event.event_id),
|
||||
},
|
||||
.type = type,
|
||||
.flags = flags,
|
||||
.id = prog->aux->id,
|
||||
},
|
||||
};
|
||||
|
||||
BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
|
||||
|
||||
memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
|
||||
perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
|
||||
}
|
||||
|
||||
void perf_event_itrace_started(struct perf_event *event)
|
||||
{
|
||||
event->attach_state |= PERF_ATTACH_ITRACE;
|
||||
@ -9909,6 +10119,10 @@ static void account_event(struct perf_event *event)
|
||||
inc = true;
|
||||
if (is_cgroup_event(event))
|
||||
inc = true;
|
||||
if (event->attr.ksymbol)
|
||||
atomic_inc(&nr_ksymbol_events);
|
||||
if (event->attr.bpf_event)
|
||||
atomic_inc(&nr_bpf_events);
|
||||
|
||||
if (inc) {
|
||||
/*
|
||||
|
@ -494,7 +494,7 @@ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
|
||||
|
||||
static int get_ksymbol_bpf(struct kallsym_iter *iter)
|
||||
{
|
||||
iter->module_name[0] = '\0';
|
||||
strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
|
||||
iter->exported = 0;
|
||||
return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
|
||||
&iter->value, &iter->type,
|
||||
|
@ -431,8 +431,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
|
||||
if (unlikely(event->oncpu != cpu))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
perf_event_output(event, sd, regs);
|
||||
return 0;
|
||||
return perf_event_output(event, sd, regs);
|
||||
}
|
||||
|
||||
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
|
||||
|
@ -372,7 +372,9 @@ struct perf_event_attr {
|
||||
context_switch : 1, /* context switch data */
|
||||
write_backward : 1, /* Write ring buffer from end to beginning */
|
||||
namespaces : 1, /* include namespaces data */
|
||||
__reserved_1 : 35;
|
||||
ksymbol : 1, /* include ksymbol events */
|
||||
bpf_event : 1, /* include bpf events */
|
||||
__reserved_1 : 33;
|
||||
|
||||
union {
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
@ -963,9 +965,58 @@ enum perf_event_type {
|
||||
*/
|
||||
PERF_RECORD_NAMESPACES = 16,
|
||||
|
||||
/*
|
||||
* Record ksymbol register/unregister events:
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 addr;
|
||||
* u32 len;
|
||||
* u16 ksym_type;
|
||||
* u16 flags;
|
||||
* char name[];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_KSYMBOL = 17,
|
||||
|
||||
/*
|
||||
* Record bpf events:
|
||||
* enum perf_bpf_event_type {
|
||||
* PERF_BPF_EVENT_UNKNOWN = 0,
|
||||
* PERF_BPF_EVENT_PROG_LOAD = 1,
|
||||
* PERF_BPF_EVENT_PROG_UNLOAD = 2,
|
||||
* };
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u16 type;
|
||||
* u16 flags;
|
||||
* u32 id;
|
||||
* u8 tag[BPF_TAG_SIZE];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_BPF_EVENT = 18,
|
||||
|
||||
PERF_RECORD_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_record_ksymbol_type {
|
||||
PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
|
||||
PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
|
||||
PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
|
||||
|
||||
enum perf_bpf_event_type {
|
||||
PERF_BPF_EVENT_UNKNOWN = 0,
|
||||
PERF_BPF_EVENT_PROG_LOAD = 1,
|
||||
PERF_BPF_EVENT_PROG_UNLOAD = 2,
|
||||
PERF_BPF_EVENT_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_MAX_STACK_DEPTH 127
|
||||
#define PERF_MAX_CONTEXTS_PER_STACK 8
|
||||
|
||||
|
@ -2343,7 +2343,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
|
||||
struct c2c_cacheline_browser *cl_browser;
|
||||
struct hist_browser *browser;
|
||||
int key = -1;
|
||||
const char help[] =
|
||||
static const char help[] =
|
||||
" ENTER Toggle callchains (if present) \n"
|
||||
" n Toggle Node details info \n"
|
||||
" s Toggle full length of symbol and source line columns \n"
|
||||
@ -2424,7 +2424,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
|
||||
{
|
||||
struct hist_browser *browser;
|
||||
int key = -1;
|
||||
const char help[] =
|
||||
static const char help[] =
|
||||
" d Display cacheline details \n"
|
||||
" ENTER Toggle callchains (if present) \n"
|
||||
" q Quit \n";
|
||||
|
@ -334,7 +334,7 @@ static int build_alloc_func_list(void)
|
||||
struct alloc_func *func;
|
||||
struct machine *machine = &kmem_session->machines.host;
|
||||
regex_t alloc_func_regex;
|
||||
const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
|
||||
static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
|
||||
|
||||
ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
|
||||
if (ret) {
|
||||
@ -1924,7 +1924,7 @@ int cmd_kmem(int argc, const char **argv)
|
||||
NULL
|
||||
};
|
||||
struct perf_session *session;
|
||||
const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
|
||||
static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
|
||||
int ret = perf_config(kmem_config, NULL);
|
||||
|
||||
if (ret)
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "util/perf-hooks.h"
|
||||
#include "util/time-utils.h"
|
||||
#include "util/units.h"
|
||||
#include "util/bpf-event.h"
|
||||
#include "asm/bug.h"
|
||||
|
||||
#include <errno.h>
|
||||
@ -1082,6 +1083,11 @@ static int record__synthesize(struct record *rec, bool tail)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
|
||||
machine, opts);
|
||||
if (err < 0)
|
||||
pr_warning("Couldn't synthesize bpf events.\n");
|
||||
|
||||
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
|
||||
process_synthesized_event, opts->sample_address,
|
||||
1);
|
||||
@ -1839,6 +1845,7 @@ static struct option __record_options[] = {
|
||||
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
|
||||
"synthesize non-sample events at the end of output"),
|
||||
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
|
||||
OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
|
||||
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
|
||||
"Fail if the specified frequency can't be used"),
|
||||
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
|
||||
|
@ -956,9 +956,9 @@ int cmd_report(int argc, const char **argv)
|
||||
int branch_mode = -1;
|
||||
bool branch_call_mode = false;
|
||||
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
|
||||
const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
|
||||
CALLCHAIN_REPORT_HELP
|
||||
"\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
|
||||
static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
|
||||
CALLCHAIN_REPORT_HELP
|
||||
"\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
|
||||
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
|
||||
const char * const report_usage[] = {
|
||||
"perf report [<options>]",
|
||||
|
@ -3336,7 +3336,7 @@ static int __cmd_record(int argc, const char **argv)
|
||||
|
||||
int cmd_sched(int argc, const char **argv)
|
||||
{
|
||||
const char default_sort_order[] = "avg, max, switch, runtime";
|
||||
static const char default_sort_order[] = "avg, max, switch, runtime";
|
||||
struct perf_sched sched = {
|
||||
.tool = {
|
||||
.sample = perf_sched__process_tracepoint_sample,
|
||||
|
@ -83,7 +83,6 @@
|
||||
#include <unistd.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/wait.h>
|
||||
|
||||
#include "sane_ctype.h"
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "perf.h"
|
||||
|
||||
#include "util/annotate.h"
|
||||
#include "util/bpf-event.h"
|
||||
#include "util/config.h"
|
||||
#include "util/color.h"
|
||||
#include "util/drv_configs.h"
|
||||
@ -1215,6 +1216,12 @@ static int __cmd_top(struct perf_top *top)
|
||||
|
||||
init_process_thread(top);
|
||||
|
||||
ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process,
|
||||
&top->session->machines.host,
|
||||
&top->record_opts);
|
||||
if (ret < 0)
|
||||
pr_warning("Couldn't synthesize bpf events.\n");
|
||||
|
||||
machine__synthesize_threads(&top->session->machines.host, &opts->target,
|
||||
top->evlist->threads, false,
|
||||
top->nr_threads_synthesize);
|
||||
|
@ -141,8 +141,8 @@ int sys_enter(struct syscall_enter_args *args)
|
||||
len = sizeof(augmented_args.args);
|
||||
}
|
||||
|
||||
perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
|
||||
return 0;
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
|
||||
}
|
||||
|
||||
SEC("raw_syscalls:sys_exit")
|
||||
|
@ -55,9 +55,9 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
|
||||
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; \
|
||||
len &= sizeof(augmented_args.filename.value) - 1; \
|
||||
} \
|
||||
perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
|
||||
&augmented_args, len); \
|
||||
return 0; \
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
|
||||
&augmented_args, len); \
|
||||
} \
|
||||
int syscall_exit(syscall)(struct syscall_exit_args *args) \
|
||||
{ \
|
||||
@ -125,10 +125,10 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
|
||||
/* addrlen = augmented_args.args.addrlen; */ \
|
||||
/* */ \
|
||||
probe_read(&augmented_args.addr, addrlen, args->addr_ptr); \
|
||||
perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
|
||||
&augmented_args, \
|
||||
sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen); \
|
||||
return 0; \
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
|
||||
&augmented_args, \
|
||||
sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen);\
|
||||
} \
|
||||
int syscall_exit(syscall)(struct syscall_exit_args *args) \
|
||||
{ \
|
||||
|
@ -49,11 +49,11 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
|
||||
args->filename_ptr); \
|
||||
if (__builtin_memcmp(augmented_args.filename.value, etc, 4) != 0) \
|
||||
return 0; \
|
||||
perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
|
||||
&augmented_args, \
|
||||
(sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
|
||||
augmented_args.filename.size)); \
|
||||
return 0; \
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
|
||||
&augmented_args, \
|
||||
(sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
|
||||
augmented_args.filename.size)); \
|
||||
}
|
||||
|
||||
struct syscall_enter_openat_args {
|
||||
|
@ -66,6 +66,7 @@ struct record_opts {
|
||||
bool ignore_missing_thread;
|
||||
bool strict_freq;
|
||||
bool sample_id;
|
||||
bool bpf_event;
|
||||
unsigned int freq;
|
||||
unsigned int mmap_pages;
|
||||
unsigned int auxtrace_mmap_pages;
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <sys/mman.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "tests.h"
|
||||
#include "debug.h"
|
||||
|
@ -35,7 +35,7 @@ static int list_menu__run(struct ui_browser *menu)
|
||||
{
|
||||
int key;
|
||||
unsigned long offset;
|
||||
const char help[] =
|
||||
static const char help[] =
|
||||
"h/?/F1 Show this window\n"
|
||||
"UP/DOWN/PGUP\n"
|
||||
"PGDN/SPACE\n"
|
||||
|
@ -2748,7 +2748,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
|
||||
"S Zoom into current Processor Socket\n" \
|
||||
|
||||
/* help messages are sorted by lexical order of the hotkey */
|
||||
const char report_help[] = HIST_BROWSER_HELP_COMMON
|
||||
static const char report_help[] = HIST_BROWSER_HELP_COMMON
|
||||
"i Show header information\n"
|
||||
"P Print histograms to perf.hist.N\n"
|
||||
"r Run available scripts\n"
|
||||
@ -2756,7 +2756,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
|
||||
"t Zoom into current Thread\n"
|
||||
"V Verbose (DSO names in callchains, etc)\n"
|
||||
"/ Filter symbol by name";
|
||||
const char top_help[] = HIST_BROWSER_HELP_COMMON
|
||||
static const char top_help[] = HIST_BROWSER_HELP_COMMON
|
||||
"P Print histograms to perf.hist.N\n"
|
||||
"t Zoom into current Thread\n"
|
||||
"V Verbose (DSO names in callchains, etc)\n"
|
||||
|
@ -35,6 +35,7 @@ libperf-y += dso.o
|
||||
libperf-y += symbol.o
|
||||
libperf-y += symbol_fprintf.o
|
||||
libperf-y += color.o
|
||||
libperf-y += color_config.o
|
||||
libperf-y += metricgroup.o
|
||||
libperf-y += header.o
|
||||
libperf-y += callchain.o
|
||||
@ -44,6 +45,8 @@ libperf-y += machine.o
|
||||
libperf-y += map.o
|
||||
libperf-y += pstack.o
|
||||
libperf-y += session.o
|
||||
libperf-y += sample-raw.o
|
||||
libperf-y += s390-sample-raw.o
|
||||
libperf-$(CONFIG_TRACE) += syscalltbl.o
|
||||
libperf-y += ordered-events.o
|
||||
libperf-y += namespaces.o
|
||||
@ -152,6 +155,8 @@ endif
|
||||
|
||||
libperf-y += perf-hooks.o
|
||||
|
||||
libperf-$(CONFIG_LIBBPF) += bpf-event.o
|
||||
|
||||
libperf-$(CONFIG_CXX) += c++/
|
||||
|
||||
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
|
||||
|
257
tools/perf/util/bpf-event.c
Normal file
257
tools/perf/util/bpf-event.c
Normal file
@ -0,0 +1,257 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <linux/btf.h>
|
||||
#include "bpf-event.h"
|
||||
#include "debug.h"
|
||||
#include "symbol.h"
|
||||
|
||||
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
|
||||
|
||||
static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
|
||||
{
|
||||
int ret = 0;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int machine__process_bpf_event(struct machine *machine __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample __maybe_unused)
|
||||
{
|
||||
if (dump_trace)
|
||||
perf_event__fprintf_bpf_event(event, stdout);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
|
||||
* program. One PERF_RECORD_BPF_EVENT is generated for the program. And
|
||||
* one PERF_RECORD_KSYMBOL is generated for each sub program.
|
||||
*
|
||||
* Returns:
|
||||
* 0 for success;
|
||||
* -1 for failures;
|
||||
* -2 for lack of kernel support.
|
||||
*/
|
||||
static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
|
||||
perf_event__handler_t process,
|
||||
struct machine *machine,
|
||||
int fd,
|
||||
union perf_event *event,
|
||||
struct record_opts *opts)
|
||||
{
|
||||
struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
|
||||
struct bpf_event *bpf_event = &event->bpf_event;
|
||||
u32 sub_prog_cnt, i, func_info_rec_size = 0;
|
||||
u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
|
||||
struct bpf_prog_info info = { .type = 0, };
|
||||
u32 info_len = sizeof(info);
|
||||
void *func_infos = NULL;
|
||||
u64 *prog_addrs = NULL;
|
||||
struct btf *btf = NULL;
|
||||
u32 *prog_lens = NULL;
|
||||
bool has_btf = false;
|
||||
char errbuf[512];
|
||||
int err = 0;
|
||||
|
||||
/* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
|
||||
|
||||
if (err) {
|
||||
pr_debug("%s: failed to get BPF program info: %s, aborting\n",
|
||||
__func__, str_error_r(errno, errbuf, sizeof(errbuf)));
|
||||
return -1;
|
||||
}
|
||||
if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
|
||||
pr_debug("%s: the kernel is too old, aborting\n", __func__);
|
||||
return -2;
|
||||
}
|
||||
|
||||
/* number of ksyms, func_lengths, and tags should match */
|
||||
sub_prog_cnt = info.nr_jited_ksyms;
|
||||
if (sub_prog_cnt != info.nr_prog_tags ||
|
||||
sub_prog_cnt != info.nr_jited_func_lens)
|
||||
return -1;
|
||||
|
||||
/* check BTF func info support */
|
||||
if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
|
||||
/* btf func info number should be same as sub_prog_cnt */
|
||||
if (sub_prog_cnt != info.nr_func_info) {
|
||||
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
if (btf__get_from_id(info.btf_id, &btf)) {
|
||||
pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
|
||||
return -1;
|
||||
}
|
||||
func_info_rec_size = info.func_info_rec_size;
|
||||
func_infos = calloc(sub_prog_cnt, func_info_rec_size);
|
||||
if (!func_infos) {
|
||||
pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
has_btf = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need address, length, and tag for each sub program.
|
||||
* Allocate memory and call bpf_obj_get_info_by_fd() again
|
||||
*/
|
||||
prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
|
||||
if (!prog_addrs) {
|
||||
pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
prog_lens = calloc(sub_prog_cnt, sizeof(u32));
|
||||
if (!prog_lens) {
|
||||
pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
|
||||
if (!prog_tags) {
|
||||
pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.nr_jited_ksyms = sub_prog_cnt;
|
||||
info.nr_jited_func_lens = sub_prog_cnt;
|
||||
info.nr_prog_tags = sub_prog_cnt;
|
||||
info.jited_ksyms = ptr_to_u64(prog_addrs);
|
||||
info.jited_func_lens = ptr_to_u64(prog_lens);
|
||||
info.prog_tags = ptr_to_u64(prog_tags);
|
||||
info_len = sizeof(info);
|
||||
if (has_btf) {
|
||||
info.nr_func_info = sub_prog_cnt;
|
||||
info.func_info_rec_size = func_info_rec_size;
|
||||
info.func_info = ptr_to_u64(func_infos);
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
|
||||
if (err) {
|
||||
pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Synthesize PERF_RECORD_KSYMBOL */
|
||||
for (i = 0; i < sub_prog_cnt; i++) {
|
||||
const struct bpf_func_info *finfo;
|
||||
const char *short_name = NULL;
|
||||
const struct btf_type *t;
|
||||
int name_len;
|
||||
|
||||
*ksymbol_event = (struct ksymbol_event){
|
||||
.header = {
|
||||
.type = PERF_RECORD_KSYMBOL,
|
||||
.size = sizeof(struct ksymbol_event),
|
||||
},
|
||||
.addr = prog_addrs[i],
|
||||
.len = prog_lens[i],
|
||||
.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
|
||||
.flags = 0,
|
||||
};
|
||||
name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
|
||||
"bpf_prog_");
|
||||
name_len += snprintf_hex(ksymbol_event->name + name_len,
|
||||
KSYM_NAME_LEN - name_len,
|
||||
prog_tags[i], BPF_TAG_SIZE);
|
||||
if (has_btf) {
|
||||
finfo = func_infos + i * info.func_info_rec_size;
|
||||
t = btf__type_by_id(btf, finfo->type_id);
|
||||
short_name = btf__name_by_offset(btf, t->name_off);
|
||||
} else if (i == 0 && sub_prog_cnt == 1) {
|
||||
/* no subprog */
|
||||
if (info.name[0])
|
||||
short_name = info.name;
|
||||
} else
|
||||
short_name = "F";
|
||||
if (short_name)
|
||||
name_len += snprintf(ksymbol_event->name + name_len,
|
||||
KSYM_NAME_LEN - name_len,
|
||||
"_%s", short_name);
|
||||
|
||||
ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
|
||||
sizeof(u64));
|
||||
err = perf_tool__process_synth_event(tool, event,
|
||||
machine, process);
|
||||
}
|
||||
|
||||
/* Synthesize PERF_RECORD_BPF_EVENT */
|
||||
if (opts->bpf_event) {
|
||||
*bpf_event = (struct bpf_event){
|
||||
.header = {
|
||||
.type = PERF_RECORD_BPF_EVENT,
|
||||
.size = sizeof(struct bpf_event),
|
||||
},
|
||||
.type = PERF_BPF_EVENT_PROG_LOAD,
|
||||
.flags = 0,
|
||||
.id = info.id,
|
||||
};
|
||||
memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
|
||||
err = perf_tool__process_synth_event(tool, event,
|
||||
machine, process);
|
||||
}
|
||||
|
||||
out:
|
||||
free(prog_tags);
|
||||
free(prog_lens);
|
||||
free(prog_addrs);
|
||||
free(func_infos);
|
||||
free(btf);
|
||||
return err ? -1 : 0;
|
||||
}
|
||||
|
||||
int perf_event__synthesize_bpf_events(struct perf_tool *tool,
|
||||
perf_event__handler_t process,
|
||||
struct machine *machine,
|
||||
struct record_opts *opts)
|
||||
{
|
||||
union perf_event *event;
|
||||
__u32 id = 0;
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN);
|
||||
if (!event)
|
||||
return -1;
|
||||
while (true) {
|
||||
err = bpf_prog_get_next_id(id, &id);
|
||||
if (err) {
|
||||
if (errno == ENOENT) {
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
pr_debug("%s: can't get next program: %s%s",
|
||||
__func__, strerror(errno),
|
||||
errno == EINVAL ? " -- kernel too old?" : "");
|
||||
/* don't report error on old kernel */
|
||||
err = (errno == EINVAL) ? 0 : -1;
|
||||
break;
|
||||
}
|
||||
fd = bpf_prog_get_fd_by_id(id);
|
||||
if (fd < 0) {
|
||||
pr_debug("%s: failed to get fd for prog_id %u\n",
|
||||
__func__, id);
|
||||
continue;
|
||||
}
|
||||
|
||||
err = perf_event__synthesize_one_bpf_prog(tool, process,
|
||||
machine, fd,
|
||||
event, opts);
|
||||
close(fd);
|
||||
if (err) {
|
||||
/* do not return error for old kernel */
|
||||
if (err == -2)
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
free(event);
|
||||
return err;
|
||||
}
|
38
tools/perf/util/bpf-event.h
Normal file
38
tools/perf/util/bpf-event.h
Normal file
@ -0,0 +1,38 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __PERF_BPF_EVENT_H
|
||||
#define __PERF_BPF_EVENT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include "event.h"
|
||||
|
||||
struct machine;
|
||||
union perf_event;
|
||||
struct perf_sample;
|
||||
struct perf_tool;
|
||||
struct record_opts;
|
||||
|
||||
#ifdef HAVE_LIBBPF_SUPPORT
|
||||
int machine__process_bpf_event(struct machine *machine, union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
|
||||
int perf_event__synthesize_bpf_events(struct perf_tool *tool,
|
||||
perf_event__handler_t process,
|
||||
struct machine *machine,
|
||||
struct record_opts *opts);
|
||||
#else
|
||||
static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_sample *sample __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
|
||||
perf_event__handler_t process __maybe_unused,
|
||||
struct machine *machine __maybe_unused,
|
||||
struct record_opts *opts __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif // HAVE_LIBBPF_SUPPORT
|
||||
#endif
|
@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/kernel.h>
|
||||
#include "cache.h"
|
||||
#include "config.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include "color.h"
|
||||
@ -10,44 +9,6 @@
|
||||
|
||||
int perf_use_color_default = -1;
|
||||
|
||||
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
|
||||
{
|
||||
if (value) {
|
||||
if (!strcasecmp(value, "never"))
|
||||
return 0;
|
||||
if (!strcasecmp(value, "always"))
|
||||
return 1;
|
||||
if (!strcasecmp(value, "auto"))
|
||||
goto auto_color;
|
||||
}
|
||||
|
||||
/* Missing or explicit false to turn off colorization */
|
||||
if (!perf_config_bool(var, value))
|
||||
return 0;
|
||||
|
||||
/* any normal truth value defaults to 'auto' */
|
||||
auto_color:
|
||||
if (stdout_is_tty < 0)
|
||||
stdout_is_tty = isatty(1);
|
||||
if (stdout_is_tty || pager_in_use()) {
|
||||
char *term = getenv("TERM");
|
||||
if (term && strcmp(term, "dumb"))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_color_default_config(const char *var, const char *value,
|
||||
void *cb __maybe_unused)
|
||||
{
|
||||
if (!strcmp(var, "color.ui")) {
|
||||
perf_use_color_default = perf_config_colorbool(var, value, -1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __color_vsnprintf(char *bf, size_t size, const char *color,
|
||||
const char *fmt, va_list args, const char *trail)
|
||||
{
|
||||
|
47
tools/perf/util/color_config.c
Normal file
47
tools/perf/util/color_config.c
Normal file
@ -0,0 +1,47 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/kernel.h>
|
||||
#include "cache.h"
|
||||
#include "config.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include "color.h"
|
||||
#include <math.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
|
||||
{
|
||||
if (value) {
|
||||
if (!strcasecmp(value, "never"))
|
||||
return 0;
|
||||
if (!strcasecmp(value, "always"))
|
||||
return 1;
|
||||
if (!strcasecmp(value, "auto"))
|
||||
goto auto_color;
|
||||
}
|
||||
|
||||
/* Missing or explicit false to turn off colorization */
|
||||
if (!perf_config_bool(var, value))
|
||||
return 0;
|
||||
|
||||
/* any normal truth value defaults to 'auto' */
|
||||
auto_color:
|
||||
if (stdout_is_tty < 0)
|
||||
stdout_is_tty = isatty(1);
|
||||
if (stdout_is_tty || pager_in_use()) {
|
||||
char *term = getenv("TERM");
|
||||
if (term && strcmp(term, "dumb"))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_color_default_config(const char *var, const char *value,
|
||||
void *cb __maybe_unused)
|
||||
{
|
||||
if (!strcmp(var, "color.ui")) {
|
||||
perf_use_color_default = perf_config_colorbool(var, value, -1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -8,7 +8,6 @@
|
||||
#include <sys/types.h>
|
||||
#include <stdbool.h>
|
||||
#include "rwsem.h"
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include "map.h"
|
||||
#include "namespaces.h"
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include "symbol/kallsyms.h"
|
||||
#include "asm/bug.h"
|
||||
#include "stat.h"
|
||||
#include "session.h"
|
||||
#include "bpf-event.h"
|
||||
|
||||
#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
|
||||
|
||||
@ -45,6 +47,8 @@ static const char *perf_event__names[] = {
|
||||
[PERF_RECORD_SWITCH] = "SWITCH",
|
||||
[PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
|
||||
[PERF_RECORD_NAMESPACES] = "NAMESPACES",
|
||||
[PERF_RECORD_KSYMBOL] = "KSYMBOL",
|
||||
[PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
|
||||
[PERF_RECORD_HEADER_ATTR] = "ATTR",
|
||||
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
|
||||
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
|
||||
@ -1329,6 +1333,22 @@ int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
|
||||
return machine__process_switch_event(machine, event);
|
||||
}
|
||||
|
||||
int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct machine *machine)
|
||||
{
|
||||
return machine__process_ksymbol(machine, event, sample);
|
||||
}
|
||||
|
||||
int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct machine *machine)
|
||||
{
|
||||
return machine__process_bpf_event(machine, event, sample);
|
||||
}
|
||||
|
||||
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
|
||||
{
|
||||
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
|
||||
@ -1461,6 +1481,21 @@ static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
|
||||
return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
|
||||
}
|
||||
|
||||
size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
|
||||
{
|
||||
return fprintf(fp, " ksymbol event with addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
|
||||
event->ksymbol_event.addr, event->ksymbol_event.len,
|
||||
event->ksymbol_event.ksym_type,
|
||||
event->ksymbol_event.flags, event->ksymbol_event.name);
|
||||
}
|
||||
|
||||
size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
|
||||
{
|
||||
return fprintf(fp, " bpf event with type %u, flags %u, id %u\n",
|
||||
event->bpf_event.type, event->bpf_event.flags,
|
||||
event->bpf_event.id);
|
||||
}
|
||||
|
||||
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
|
||||
{
|
||||
size_t ret = fprintf(fp, "PERF_RECORD_%s",
|
||||
@ -1496,6 +1531,12 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
|
||||
case PERF_RECORD_LOST:
|
||||
ret += perf_event__fprintf_lost(event, fp);
|
||||
break;
|
||||
case PERF_RECORD_KSYMBOL:
|
||||
ret += perf_event__fprintf_ksymbol(event, fp);
|
||||
break;
|
||||
case PERF_RECORD_BPF_EVENT:
|
||||
ret += perf_event__fprintf_bpf_event(event, fp);
|
||||
break;
|
||||
default:
|
||||
ret += fprintf(fp, "\n");
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
#include "../perf.h"
|
||||
#include "build-id.h"
|
||||
@ -84,6 +85,29 @@ struct throttle_event {
|
||||
u64 stream_id;
|
||||
};
|
||||
|
||||
#ifndef KSYM_NAME_LEN
|
||||
#define KSYM_NAME_LEN 256
|
||||
#endif
|
||||
|
||||
struct ksymbol_event {
|
||||
struct perf_event_header header;
|
||||
u64 addr;
|
||||
u32 len;
|
||||
u16 ksym_type;
|
||||
u16 flags;
|
||||
char name[KSYM_NAME_LEN];
|
||||
};
|
||||
|
||||
struct bpf_event {
|
||||
struct perf_event_header header;
|
||||
u16 type;
|
||||
u16 flags;
|
||||
u32 id;
|
||||
|
||||
/* for bpf_prog types */
|
||||
u8 tag[BPF_TAG_SIZE]; // prog tag
|
||||
};
|
||||
|
||||
#define PERF_SAMPLE_MASK \
|
||||
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
|
||||
PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
|
||||
@ -651,6 +675,8 @@ union perf_event {
|
||||
struct stat_round_event stat_round;
|
||||
struct time_conv_event time_conv;
|
||||
struct feature_event feat;
|
||||
struct ksymbol_event ksymbol_event;
|
||||
struct bpf_event bpf_event;
|
||||
};
|
||||
|
||||
void perf_event__print_totals(void);
|
||||
@ -748,6 +774,14 @@ int perf_event__process_exit(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine);
|
||||
int perf_event__process_ksymbol(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine);
|
||||
int perf_event__process_bpf_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine);
|
||||
int perf_tool__process_synth_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct machine *machine,
|
||||
@ -811,6 +845,8 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
|
||||
|
||||
int kallsyms__get_function_start(const char *kallsyms_filename,
|
||||
|
@ -49,6 +49,9 @@ struct perf_evlist {
|
||||
struct perf_evsel *selected;
|
||||
struct events_stats stats;
|
||||
struct perf_env *env;
|
||||
void (*trace_event_sample_raw)(struct perf_evlist *evlist,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
u64 first_sample_time;
|
||||
u64 last_sample_time;
|
||||
};
|
||||
@ -314,5 +317,4 @@ void perf_evlist__force_leader(struct perf_evlist *evlist);
|
||||
|
||||
struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel);
|
||||
|
||||
#endif /* __PERF_EVLIST_H */
|
||||
|
@ -1035,6 +1035,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
|
||||
attr->mmap = track;
|
||||
attr->mmap2 = track && !perf_missing_features.mmap2;
|
||||
attr->comm = track;
|
||||
attr->ksymbol = track && !perf_missing_features.ksymbol;
|
||||
attr->bpf_event = track && opts->bpf_event &&
|
||||
!perf_missing_features.bpf_event;
|
||||
|
||||
if (opts->record_namespaces)
|
||||
attr->namespaces = track;
|
||||
@ -1652,6 +1655,8 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
|
||||
PRINT_ATTRf(context_switch, p_unsigned);
|
||||
PRINT_ATTRf(write_backward, p_unsigned);
|
||||
PRINT_ATTRf(namespaces, p_unsigned);
|
||||
PRINT_ATTRf(ksymbol, p_unsigned);
|
||||
PRINT_ATTRf(bpf_event, p_unsigned);
|
||||
|
||||
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
|
||||
PRINT_ATTRf(bp_type, p_unsigned);
|
||||
@ -1811,6 +1816,10 @@ fallback_missing_features:
|
||||
PERF_SAMPLE_BRANCH_NO_CYCLES);
|
||||
if (perf_missing_features.group_read && evsel->attr.inherit)
|
||||
evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
|
||||
if (perf_missing_features.ksymbol)
|
||||
evsel->attr.ksymbol = 0;
|
||||
if (perf_missing_features.bpf_event)
|
||||
evsel->attr.bpf_event = 0;
|
||||
retry_sample_id:
|
||||
if (perf_missing_features.sample_id_all)
|
||||
evsel->attr.sample_id_all = 0;
|
||||
@ -1930,7 +1939,15 @@ try_fallback:
|
||||
* Must probe features in the order they were added to the
|
||||
* perf_event_attr interface.
|
||||
*/
|
||||
if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
|
||||
if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
|
||||
perf_missing_features.bpf_event = true;
|
||||
pr_debug2("switching off bpf_event\n");
|
||||
goto fallback_missing_features;
|
||||
} else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
|
||||
perf_missing_features.ksymbol = true;
|
||||
pr_debug2("switching off ksymbol\n");
|
||||
goto fallback_missing_features;
|
||||
} else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
|
||||
perf_missing_features.write_backward = true;
|
||||
pr_debug2("switching off write_backward\n");
|
||||
goto out_close;
|
||||
|
@ -168,6 +168,8 @@ struct perf_missing_features {
|
||||
bool lbr_flags;
|
||||
bool write_backward;
|
||||
bool group_read;
|
||||
bool ksymbol;
|
||||
bool bpf_event;
|
||||
};
|
||||
|
||||
extern struct perf_missing_features perf_missing_features;
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "unwind.h"
|
||||
#include "linux/hash.h"
|
||||
#include "asm/bug.h"
|
||||
#include "bpf-event.h"
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <symbol/kallsyms.h>
|
||||
@ -681,6 +682,59 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int machine__process_ksymbol_register(struct machine *machine,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample __maybe_unused)
|
||||
{
|
||||
struct symbol *sym;
|
||||
struct map *map;
|
||||
|
||||
map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
|
||||
if (!map) {
|
||||
map = dso__new_map(event->ksymbol_event.name);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
map->start = event->ksymbol_event.addr;
|
||||
map->pgoff = map->start;
|
||||
map->end = map->start + event->ksymbol_event.len;
|
||||
map_groups__insert(&machine->kmaps, map);
|
||||
}
|
||||
|
||||
sym = symbol__new(event->ksymbol_event.addr, event->ksymbol_event.len,
|
||||
0, 0, event->ksymbol_event.name);
|
||||
if (!sym)
|
||||
return -ENOMEM;
|
||||
dso__insert_symbol(map->dso, sym);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int machine__process_ksymbol_unregister(struct machine *machine,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample __maybe_unused)
|
||||
{
|
||||
struct map *map;
|
||||
|
||||
map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
|
||||
if (map)
|
||||
map_groups__remove(&machine->kmaps, map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int machine__process_ksymbol(struct machine *machine __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
if (dump_trace)
|
||||
perf_event__fprintf_ksymbol(event, stdout);
|
||||
|
||||
if (event->ksymbol_event.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
|
||||
return machine__process_ksymbol_unregister(machine, event,
|
||||
sample);
|
||||
return machine__process_ksymbol_register(machine, event, sample);
|
||||
}
|
||||
|
||||
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
|
||||
{
|
||||
const char *dup_filename;
|
||||
@ -1812,6 +1866,10 @@ int machine__process_event(struct machine *machine, union perf_event *event,
|
||||
case PERF_RECORD_SWITCH:
|
||||
case PERF_RECORD_SWITCH_CPU_WIDE:
|
||||
ret = machine__process_switch_event(machine, event); break;
|
||||
case PERF_RECORD_KSYMBOL:
|
||||
ret = machine__process_ksymbol(machine, event, sample); break;
|
||||
case PERF_RECORD_BPF_EVENT:
|
||||
ret = machine__process_bpf_event(machine, event, sample); break;
|
||||
default:
|
||||
ret = -1;
|
||||
break;
|
||||
|
@ -130,6 +130,9 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
|
||||
struct perf_sample *sample);
|
||||
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
int machine__process_ksymbol(struct machine *machine,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
int machine__process_event(struct machine *machine, union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
|
||||
|
62
tools/perf/util/s390-cpumcf-kernel.h
Normal file
62
tools/perf/util/s390-cpumcf-kernel.h
Normal file
@ -0,0 +1,62 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Support for s390 CPU measurement counter set diagnostic facility
|
||||
*
|
||||
* Copyright IBM Corp. 2019
|
||||
Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
|
||||
* Thomas Richter <tmricht@linux.ibm.com>
|
||||
*/
|
||||
#ifndef S390_CPUMCF_KERNEL_H
|
||||
#define S390_CPUMCF_KERNEL_H
|
||||
|
||||
#define S390_CPUMCF_DIAG_DEF 0xfeef /* Counter diagnostic entry ID */
|
||||
#define PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */
|
||||
|
||||
struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
|
||||
unsigned int def:16; /* 0-15 Data Entry Format */
|
||||
unsigned int set:16; /* 16-23 Counter set identifier */
|
||||
unsigned int ctr:16; /* 24-39 Number of stored counters */
|
||||
unsigned int res1:16; /* 40-63 Reserved */
|
||||
};
|
||||
|
||||
struct cf_trailer_entry { /* CPU-M CF trailer for raw traces (64 byte) */
|
||||
/* 0 - 7 */
|
||||
union {
|
||||
struct {
|
||||
unsigned int clock_base:1; /* TOD clock base */
|
||||
unsigned int speed:1; /* CPU speed */
|
||||
/* Measurement alerts */
|
||||
unsigned int mtda:1; /* Loss of MT ctr. data alert */
|
||||
unsigned int caca:1; /* Counter auth. change alert */
|
||||
unsigned int lcda:1; /* Loss of counter data alert */
|
||||
};
|
||||
unsigned long flags; /* 0-63 All indicators */
|
||||
};
|
||||
/* 8 - 15 */
|
||||
unsigned int cfvn:16; /* 64-79 Ctr First Version */
|
||||
unsigned int csvn:16; /* 80-95 Ctr Second Version */
|
||||
unsigned int cpu_speed:32; /* 96-127 CPU speed */
|
||||
/* 16 - 23 */
|
||||
unsigned long timestamp; /* 128-191 Timestamp (TOD) */
|
||||
/* 24 - 55 */
|
||||
union {
|
||||
struct {
|
||||
unsigned long progusage1;
|
||||
unsigned long progusage2;
|
||||
unsigned long progusage3;
|
||||
unsigned long tod_base;
|
||||
};
|
||||
unsigned long progusage[4];
|
||||
};
|
||||
/* 56 - 63 */
|
||||
unsigned int mach_type:16; /* Machine type */
|
||||
unsigned int res1:16; /* Reserved */
|
||||
unsigned int res2:32; /* Reserved */
|
||||
};
|
||||
|
||||
#define CPUMF_CTR_SET_BASIC 0 /* Basic Counter Set */
|
||||
#define CPUMF_CTR_SET_USER 1 /* Problem-State Counter Set */
|
||||
#define CPUMF_CTR_SET_CRYPTO 2 /* Crypto-Activity Counter Set */
|
||||
#define CPUMF_CTR_SET_EXT 3 /* Extended Counter Set */
|
||||
#define CPUMF_CTR_SET_MT_DIAG 4 /* MT-diagnostic Counter Set */
|
||||
#endif
|
@ -162,6 +162,7 @@
|
||||
#include "auxtrace.h"
|
||||
#include "s390-cpumsf.h"
|
||||
#include "s390-cpumsf-kernel.h"
|
||||
#include "s390-cpumcf-kernel.h"
|
||||
#include "config.h"
|
||||
|
||||
struct s390_cpumsf {
|
||||
@ -184,8 +185,58 @@ struct s390_cpumsf_queue {
|
||||
struct auxtrace_buffer *buffer;
|
||||
int cpu;
|
||||
FILE *logfile;
|
||||
FILE *logfile_ctr;
|
||||
};
|
||||
|
||||
/* Check if the raw data should be dumped to file. If this is the case and
|
||||
* the file to dump to has not been opened for writing, do so.
|
||||
*
|
||||
* Return 0 on success and greater zero on error so processing continues.
|
||||
*/
|
||||
static int s390_cpumcf_dumpctr(struct s390_cpumsf *sf,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct s390_cpumsf_queue *sfq;
|
||||
struct auxtrace_queue *q;
|
||||
int rc = 0;
|
||||
|
||||
if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
|
||||
return rc;
|
||||
|
||||
q = &sf->queues.queue_array[sample->cpu];
|
||||
sfq = q->priv;
|
||||
if (!sfq) /* Queue not yet allocated */
|
||||
return rc;
|
||||
|
||||
if (!sfq->logfile_ctr) {
|
||||
char *name;
|
||||
|
||||
rc = (sf->logdir)
|
||||
? asprintf(&name, "%s/aux.ctr.%02x",
|
||||
sf->logdir, sample->cpu)
|
||||
: asprintf(&name, "aux.ctr.%02x", sample->cpu);
|
||||
if (rc > 0)
|
||||
sfq->logfile_ctr = fopen(name, "w");
|
||||
if (sfq->logfile_ctr == NULL) {
|
||||
pr_err("Failed to open counter set log file %s, "
|
||||
"continue...\n", name);
|
||||
rc = 1;
|
||||
}
|
||||
free(name);
|
||||
}
|
||||
|
||||
if (sfq->logfile_ctr) {
|
||||
/* See comment above for -4 */
|
||||
size_t n = fwrite(sample->raw_data, sample->raw_size - 4, 1,
|
||||
sfq->logfile_ctr);
|
||||
if (n != 1) {
|
||||
pr_err("Failed to write counter set data\n");
|
||||
rc = 1;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Display s390 CPU measurement facility basic-sampling data entry */
|
||||
static bool s390_cpumsf_basic_show(const char *color, size_t pos,
|
||||
struct hws_basic_entry *basic)
|
||||
@ -792,7 +843,7 @@ static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample)
|
||||
}
|
||||
|
||||
static int
|
||||
s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
|
||||
s390_cpumsf_process_event(struct perf_session *session,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_tool *tool)
|
||||
@ -801,6 +852,8 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
|
||||
struct s390_cpumsf,
|
||||
auxtrace);
|
||||
u64 timestamp = sample->time;
|
||||
struct perf_evsel *ev_bc000;
|
||||
|
||||
int err = 0;
|
||||
|
||||
if (dump_trace)
|
||||
@ -811,6 +864,16 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (event->header.type == PERF_RECORD_SAMPLE &&
|
||||
sample->raw_size) {
|
||||
/* Handle event with raw data */
|
||||
ev_bc000 = perf_evlist__event2evsel(session->evlist, event);
|
||||
if (ev_bc000 &&
|
||||
ev_bc000->attr.config == PERF_EVENT_CPUM_CF_DIAG)
|
||||
err = s390_cpumcf_dumpctr(sf, sample);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (event->header.type == PERF_RECORD_AUX &&
|
||||
event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
|
||||
return s390_cpumsf_lost(sf, sample);
|
||||
@ -891,9 +954,15 @@ static void s390_cpumsf_free_queues(struct perf_session *session)
|
||||
struct s390_cpumsf_queue *sfq = (struct s390_cpumsf_queue *)
|
||||
queues->queue_array[i].priv;
|
||||
|
||||
if (sfq != NULL && sfq->logfile) {
|
||||
fclose(sfq->logfile);
|
||||
sfq->logfile = NULL;
|
||||
if (sfq != NULL) {
|
||||
if (sfq->logfile) {
|
||||
fclose(sfq->logfile);
|
||||
sfq->logfile = NULL;
|
||||
}
|
||||
if (sfq->logfile_ctr) {
|
||||
fclose(sfq->logfile_ctr);
|
||||
sfq->logfile_ctr = NULL;
|
||||
}
|
||||
}
|
||||
zfree(&queues->queue_array[i].priv);
|
||||
}
|
||||
|
222
tools/perf/util/s390-sample-raw.c
Normal file
222
tools/perf/util/s390-sample-raw.c
Normal file
@ -0,0 +1,222 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright IBM Corp. 2019
|
||||
* Author(s): Thomas Richter <tmricht@linux.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Architecture specific trace_event function. Save event's bc000 raw data
|
||||
* to file. File name is aux.ctr.## where ## stands for the CPU number the
|
||||
* sample was taken from.
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "util.h"
|
||||
#include "auxtrace.h"
|
||||
#include "session.h"
|
||||
#include "evlist.h"
|
||||
#include "config.h"
|
||||
#include "color.h"
|
||||
#include "sample-raw.h"
|
||||
#include "s390-cpumcf-kernel.h"
|
||||
#include "pmu-events/pmu-events.h"
|
||||
|
||||
static size_t ctrset_size(struct cf_ctrset_entry *set)
|
||||
{
|
||||
return sizeof(*set) + set->ctr * sizeof(u64);
|
||||
}
|
||||
|
||||
static bool ctrset_valid(struct cf_ctrset_entry *set)
|
||||
{
|
||||
return set->def == S390_CPUMCF_DIAG_DEF;
|
||||
}
|
||||
|
||||
/* CPU Measurement Counter Facility raw data is a byte stream. It is 8 byte
|
||||
* aligned and might have trailing padding bytes.
|
||||
* Display the raw data on screen.
|
||||
*/
|
||||
static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
|
||||
{
|
||||
size_t len = sample->raw_size, offset = 0;
|
||||
unsigned char *buf = sample->raw_data;
|
||||
struct cf_trailer_entry *te;
|
||||
struct cf_ctrset_entry *cep, ce;
|
||||
|
||||
if (!len)
|
||||
return false;
|
||||
while (offset < len) {
|
||||
cep = (struct cf_ctrset_entry *)(buf + offset);
|
||||
ce.def = be16_to_cpu(cep->def);
|
||||
ce.set = be16_to_cpu(cep->set);
|
||||
ce.ctr = be16_to_cpu(cep->ctr);
|
||||
ce.res1 = be16_to_cpu(cep->res1);
|
||||
|
||||
if (!ctrset_valid(&ce) || offset + ctrset_size(&ce) > len) {
|
||||
/* Raw data for counter sets are always multiple of 8
|
||||
* bytes. Prepending a 4 bytes size field to the
|
||||
* raw data block in the sample causes the perf tool
|
||||
* to append 4 padding bytes to make the raw data part
|
||||
* of the sample a multiple of eight bytes again.
|
||||
*
|
||||
* If the last entry (trailer) is 4 bytes off the raw
|
||||
* area data end, all is good.
|
||||
*/
|
||||
if (len - offset - sizeof(*te) == 4)
|
||||
break;
|
||||
pr_err("Invalid counter set entry at %zd\n", offset);
|
||||
return false;
|
||||
}
|
||||
offset += ctrset_size(&ce);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Dump event bc000 on screen, already tested on correctness. */
|
||||
static void s390_cpumcfdg_dumptrail(const char *color, size_t offset,
|
||||
struct cf_trailer_entry *tep)
|
||||
{
|
||||
struct cf_trailer_entry te;
|
||||
|
||||
te.flags = be64_to_cpu(tep->flags);
|
||||
te.cfvn = be16_to_cpu(tep->cfvn);
|
||||
te.csvn = be16_to_cpu(tep->csvn);
|
||||
te.cpu_speed = be32_to_cpu(tep->cpu_speed);
|
||||
te.timestamp = be64_to_cpu(tep->timestamp);
|
||||
te.progusage1 = be64_to_cpu(tep->progusage1);
|
||||
te.progusage2 = be64_to_cpu(tep->progusage2);
|
||||
te.progusage3 = be64_to_cpu(tep->progusage3);
|
||||
te.tod_base = be64_to_cpu(tep->tod_base);
|
||||
te.mach_type = be16_to_cpu(tep->mach_type);
|
||||
te.res1 = be16_to_cpu(tep->res1);
|
||||
te.res2 = be32_to_cpu(tep->res2);
|
||||
|
||||
color_fprintf(stdout, color, " [%#08zx] Trailer:%c%c%c%c%c"
|
||||
" Cfvn:%d Csvn:%d Speed:%d TOD:%#llx\n",
|
||||
offset, te.clock_base ? 'T' : ' ',
|
||||
te.speed ? 'S' : ' ', te.mtda ? 'M' : ' ',
|
||||
te.caca ? 'C' : ' ', te.lcda ? 'L' : ' ',
|
||||
te.cfvn, te.csvn, te.cpu_speed, te.timestamp);
|
||||
color_fprintf(stdout, color, "\t\t1:%lx 2:%lx 3:%lx TOD-Base:%#llx"
|
||||
" Type:%x\n\n",
|
||||
te.progusage1, te.progusage2, te.progusage3,
|
||||
te.tod_base, te.mach_type);
|
||||
}
|
||||
|
||||
/* Return starting number of a counter set */
|
||||
static int get_counterset_start(int setnr)
|
||||
{
|
||||
switch (setnr) {
|
||||
case CPUMF_CTR_SET_BASIC: /* Basic counter set */
|
||||
return 0;
|
||||
case CPUMF_CTR_SET_USER: /* Problem state counter set */
|
||||
return 32;
|
||||
case CPUMF_CTR_SET_CRYPTO: /* Crypto counter set */
|
||||
return 64;
|
||||
case CPUMF_CTR_SET_EXT: /* Extended counter set */
|
||||
return 128;
|
||||
case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */
|
||||
return 448;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Scan the PMU table and extract the logical name of a counter from the
|
||||
* PMU events table. Input is the counter set and counter number with in the
|
||||
* set. Construct the event number and use this as key. If they match return
|
||||
* the name of this counter.
|
||||
* If no match is found a NULL pointer is returned.
|
||||
*/
|
||||
static const char *get_counter_name(int set, int nr, struct pmu_events_map *map)
|
||||
{
|
||||
int rc, event_nr, wanted = get_counterset_start(set) + nr;
|
||||
|
||||
if (map) {
|
||||
struct pmu_event *evp = map->table;
|
||||
|
||||
for (; evp->name || evp->event || evp->desc; ++evp) {
|
||||
if (evp->name == NULL || evp->event == NULL)
|
||||
continue;
|
||||
rc = sscanf(evp->event, "event=%x", &event_nr);
|
||||
if (rc == 1 && event_nr == wanted)
|
||||
return evp->name;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void s390_cpumcfdg_dump(struct perf_sample *sample)
|
||||
{
|
||||
size_t i, len = sample->raw_size, offset = 0;
|
||||
unsigned char *buf = sample->raw_data;
|
||||
const char *color = PERF_COLOR_BLUE;
|
||||
struct cf_ctrset_entry *cep, ce;
|
||||
struct pmu_events_map *map;
|
||||
struct perf_pmu pmu;
|
||||
u64 *p;
|
||||
|
||||
memset(&pmu, 0, sizeof(pmu));
|
||||
map = perf_pmu__find_map(&pmu);
|
||||
while (offset < len) {
|
||||
cep = (struct cf_ctrset_entry *)(buf + offset);
|
||||
|
||||
ce.def = be16_to_cpu(cep->def);
|
||||
ce.set = be16_to_cpu(cep->set);
|
||||
ce.ctr = be16_to_cpu(cep->ctr);
|
||||
ce.res1 = be16_to_cpu(cep->res1);
|
||||
|
||||
if (!ctrset_valid(&ce)) { /* Print trailer */
|
||||
s390_cpumcfdg_dumptrail(color, offset,
|
||||
(struct cf_trailer_entry *)cep);
|
||||
return;
|
||||
}
|
||||
|
||||
color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
|
||||
" Counters:%d\n", offset, ce.set, ce.ctr);
|
||||
for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
|
||||
const char *ev_name = get_counter_name(ce.set, i, map);
|
||||
|
||||
color_fprintf(stdout, color,
|
||||
"\tCounter:%03d %s Value:%#018lx\n", i,
|
||||
ev_name ?: "<unknown>", be64_to_cpu(*p));
|
||||
}
|
||||
offset += ctrset_size(&ce);
|
||||
}
|
||||
}
|
||||
|
||||
/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
|
||||
* and if the event was triggered by a counter set diagnostic event display
|
||||
* its raw data.
|
||||
* The function is only invoked when the dump flag -D is set.
|
||||
*/
|
||||
void perf_evlist__s390_sample_raw(struct perf_evlist *evlist, union perf_event *event,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct perf_evsel *ev_bc000;
|
||||
|
||||
if (event->header.type != PERF_RECORD_SAMPLE)
|
||||
return;
|
||||
|
||||
ev_bc000 = perf_evlist__event2evsel(evlist, event);
|
||||
if (ev_bc000 == NULL ||
|
||||
ev_bc000->attr.config != PERF_EVENT_CPUM_CF_DIAG)
|
||||
return;
|
||||
|
||||
/* Display raw data on screen */
|
||||
if (!s390_cpumcfdg_testctr(sample)) {
|
||||
pr_err("Invalid counter set data encountered\n");
|
||||
return;
|
||||
}
|
||||
s390_cpumcfdg_dump(sample);
|
||||
}
|
18
tools/perf/util/sample-raw.c
Normal file
18
tools/perf/util/sample-raw.c
Normal file
@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <string.h>
|
||||
#include "evlist.h"
|
||||
#include "env.h"
|
||||
#include "sample-raw.h"
|
||||
|
||||
/*
|
||||
* Check platform the perf data file was created on and perform platform
|
||||
* specific interpretation.
|
||||
*/
|
||||
void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist)
|
||||
{
|
||||
const char *arch_pf = perf_env__arch(evlist->env);
|
||||
|
||||
if (arch_pf && !strcmp("s390", arch_pf))
|
||||
evlist->trace_event_sample_raw = perf_evlist__s390_sample_raw;
|
||||
}
|
14
tools/perf/util/sample-raw.h
Normal file
14
tools/perf/util/sample-raw.h
Normal file
@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __SAMPLE_RAW_H
|
||||
#define __SAMPLE_RAW_H 1
|
||||
|
||||
struct perf_evlist;
|
||||
union perf_event;
|
||||
struct perf_sample;
|
||||
|
||||
void perf_evlist__s390_sample_raw(struct perf_evlist *evlist,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample);
|
||||
|
||||
void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist);
|
||||
#endif /* __PERF_EVLIST_H */
|
@ -23,6 +23,7 @@
|
||||
#include "auxtrace.h"
|
||||
#include "thread.h"
|
||||
#include "thread-stack.h"
|
||||
#include "sample-raw.h"
|
||||
#include "stat.h"
|
||||
#include "arch/common.h"
|
||||
|
||||
@ -147,6 +148,8 @@ struct perf_session *perf_session__new(struct perf_data *data,
|
||||
perf_session__set_id_hdr_size(session);
|
||||
perf_session__set_comm_exec(session);
|
||||
}
|
||||
|
||||
perf_evlist__init_trace_event_sample_raw(session->evlist);
|
||||
}
|
||||
} else {
|
||||
session->machines.host.env = &perf_env;
|
||||
@ -376,6 +379,10 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
|
||||
tool->itrace_start = perf_event__process_itrace_start;
|
||||
if (tool->context_switch == NULL)
|
||||
tool->context_switch = perf_event__process_switch;
|
||||
if (tool->ksymbol == NULL)
|
||||
tool->ksymbol = perf_event__process_ksymbol;
|
||||
if (tool->bpf_event == NULL)
|
||||
tool->bpf_event = perf_event__process_bpf_event;
|
||||
if (tool->read == NULL)
|
||||
tool->read = process_event_sample_stub;
|
||||
if (tool->throttle == NULL)
|
||||
@ -1065,6 +1072,8 @@ static void dump_event(struct perf_evlist *evlist, union perf_event *event,
|
||||
file_offset, event->header.size, event->header.type);
|
||||
|
||||
trace_event(event);
|
||||
if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
|
||||
evlist->trace_event_sample_raw(evlist, event, sample);
|
||||
|
||||
if (sample)
|
||||
perf_evlist__print_tstamp(evlist, event, sample);
|
||||
@ -1305,6 +1314,10 @@ static int machines__deliver_event(struct machines *machines,
|
||||
case PERF_RECORD_SWITCH:
|
||||
case PERF_RECORD_SWITCH_CPU_WIDE:
|
||||
return tool->context_switch(tool, event, sample, machine);
|
||||
case PERF_RECORD_KSYMBOL:
|
||||
return tool->ksymbol(tool, event, sample, machine);
|
||||
case PERF_RECORD_BPF_EVENT:
|
||||
return tool->bpf_event(tool, event, sample, machine);
|
||||
default:
|
||||
++evlist->stats.nr_unknown_events;
|
||||
return -1;
|
||||
@ -1820,38 +1833,35 @@ fetch_mmaped_event(struct perf_session *session,
|
||||
#define NUM_MMAPS 128
|
||||
#endif
|
||||
|
||||
static int __perf_session__process_events(struct perf_session *session,
|
||||
u64 data_offset, u64 data_size,
|
||||
u64 file_size)
|
||||
struct reader {
|
||||
int fd;
|
||||
u64 data_size;
|
||||
u64 data_offset;
|
||||
};
|
||||
|
||||
static int
|
||||
reader__process_events(struct reader *rd, struct perf_session *session,
|
||||
struct ui_progress *prog)
|
||||
{
|
||||
struct ordered_events *oe = &session->ordered_events;
|
||||
struct perf_tool *tool = session->tool;
|
||||
int fd = perf_data__fd(session->data);
|
||||
u64 data_size = rd->data_size;
|
||||
u64 head, page_offset, file_offset, file_pos, size;
|
||||
int err, mmap_prot, mmap_flags, map_idx = 0;
|
||||
int err = 0, mmap_prot, mmap_flags, map_idx = 0;
|
||||
size_t mmap_size;
|
||||
char *buf, *mmaps[NUM_MMAPS];
|
||||
union perf_event *event;
|
||||
struct ui_progress prog;
|
||||
s64 skip;
|
||||
|
||||
perf_tool__fill_defaults(tool);
|
||||
|
||||
page_offset = page_size * (data_offset / page_size);
|
||||
page_offset = page_size * (rd->data_offset / page_size);
|
||||
file_offset = page_offset;
|
||||
head = data_offset - page_offset;
|
||||
head = rd->data_offset - page_offset;
|
||||
|
||||
if (data_size == 0)
|
||||
goto out;
|
||||
ui_progress__init_size(prog, data_size, "Processing events...");
|
||||
|
||||
if (data_offset + data_size < file_size)
|
||||
file_size = data_offset + data_size;
|
||||
|
||||
ui_progress__init_size(&prog, file_size, "Processing events...");
|
||||
data_size += rd->data_offset;
|
||||
|
||||
mmap_size = MMAP_SIZE;
|
||||
if (mmap_size > file_size) {
|
||||
mmap_size = file_size;
|
||||
if (mmap_size > data_size) {
|
||||
mmap_size = data_size;
|
||||
session->one_mmap = true;
|
||||
}
|
||||
|
||||
@ -1865,12 +1875,12 @@ static int __perf_session__process_events(struct perf_session *session,
|
||||
mmap_flags = MAP_PRIVATE;
|
||||
}
|
||||
remap:
|
||||
buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
|
||||
buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
|
||||
file_offset);
|
||||
if (buf == MAP_FAILED) {
|
||||
pr_err("failed to mmap file\n");
|
||||
err = -errno;
|
||||
goto out_err;
|
||||
goto out;
|
||||
}
|
||||
mmaps[map_idx] = buf;
|
||||
map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
|
||||
@ -1902,7 +1912,7 @@ more:
|
||||
file_offset + head, event->header.size,
|
||||
event->header.type);
|
||||
err = -EINVAL;
|
||||
goto out_err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (skip)
|
||||
@ -1911,15 +1921,40 @@ more:
|
||||
head += size;
|
||||
file_pos += size;
|
||||
|
||||
ui_progress__update(&prog, size);
|
||||
ui_progress__update(prog, size);
|
||||
|
||||
if (session_done())
|
||||
goto out;
|
||||
|
||||
if (file_pos < file_size)
|
||||
if (file_pos < data_size)
|
||||
goto more;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __perf_session__process_events(struct perf_session *session)
|
||||
{
|
||||
struct reader rd = {
|
||||
.fd = perf_data__fd(session->data),
|
||||
.data_size = session->header.data_size,
|
||||
.data_offset = session->header.data_offset,
|
||||
};
|
||||
struct ordered_events *oe = &session->ordered_events;
|
||||
struct perf_tool *tool = session->tool;
|
||||
struct ui_progress prog;
|
||||
int err;
|
||||
|
||||
perf_tool__fill_defaults(tool);
|
||||
|
||||
if (rd.data_size == 0)
|
||||
return -1;
|
||||
|
||||
ui_progress__init_size(&prog, rd.data_size, "Processing events...");
|
||||
|
||||
err = reader__process_events(&rd, session, &prog);
|
||||
if (err)
|
||||
goto out_err;
|
||||
/* do the final flush for ordered samples */
|
||||
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
|
||||
if (err)
|
||||
@ -1944,20 +1979,13 @@ out_err:
|
||||
|
||||
int perf_session__process_events(struct perf_session *session)
|
||||
{
|
||||
u64 size = perf_data__size(session->data);
|
||||
int err;
|
||||
|
||||
if (perf_session__register_idle_thread(session) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!perf_data__is_pipe(session->data))
|
||||
err = __perf_session__process_events(session,
|
||||
session->header.data_offset,
|
||||
session->header.data_size, size);
|
||||
else
|
||||
err = __perf_session__process_pipe_events(session);
|
||||
if (perf_data__is_pipe(session->data))
|
||||
return __perf_session__process_pipe_events(session);
|
||||
|
||||
return err;
|
||||
return __perf_session__process_events(session);
|
||||
}
|
||||
|
||||
bool perf_session__has_traces(struct perf_session *session, const char *msg)
|
||||
|
@ -17,6 +17,8 @@ if cc == "clang":
|
||||
vars[var] = sub("-mcet", "", vars[var])
|
||||
if not clang_has_option("-fcf-protection"):
|
||||
vars[var] = sub("-fcf-protection", "", vars[var])
|
||||
if not clang_has_option("-fstack-clash-protection"):
|
||||
vars[var] = sub("-fstack-clash-protection", "", vars[var])
|
||||
|
||||
from distutils.core import setup, Extension
|
||||
|
||||
|
@ -53,7 +53,10 @@ struct perf_tool {
|
||||
itrace_start,
|
||||
context_switch,
|
||||
throttle,
|
||||
unthrottle;
|
||||
unthrottle,
|
||||
ksymbol,
|
||||
bpf_event;
|
||||
|
||||
event_attr_op attr;
|
||||
event_attr_op event_update;
|
||||
event_op2 tracing_data;
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <sys/mman.h>
|
||||
#include <zlib.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "util/compress.h"
|
||||
#include "util/util.h"
|
||||
|
Loading…
Reference in New Issue
Block a user