forked from Minki/linux
perf, bpf: Add BPF support to all perf_event types
Allow BPF_PROG_TYPE_PERF_EVENT program types to attach to all perf_event types, including HW_CACHE, RAW, and dynamic pmu events. Only tracepoint/kprobe events are treated differently which require BPF_PROG_TYPE_TRACEPOINT/BPF_PROG_TYPE_KPROBE program types accordingly. Also add support for reading all event counters using bpf_perf_event_read() helper. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5071034e4a
commit
f91840a32d
@ -896,7 +896,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
|
||||
void *context);
|
||||
extern void perf_pmu_migrate_context(struct pmu *pmu,
|
||||
int src_cpu, int dst_cpu);
|
||||
extern u64 perf_event_read_local(struct perf_event *event);
|
||||
int perf_event_read_local(struct perf_event *event, u64 *value);
|
||||
extern u64 perf_event_read_value(struct perf_event *event,
|
||||
u64 *enabled, u64 *running);
|
||||
|
||||
@ -1301,7 +1301,10 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
|
||||
static inline int perf_event_read_local(struct perf_event *event, u64 *value)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void perf_event_print_debug(void) { }
|
||||
static inline int perf_event_task_disable(void) { return -EINVAL; }
|
||||
static inline int perf_event_task_enable(void) { return -EINVAL; }
|
||||
|
@ -452,38 +452,24 @@ static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
|
||||
static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
|
||||
struct file *map_file, int fd)
|
||||
{
|
||||
const struct perf_event_attr *attr;
|
||||
struct bpf_event_entry *ee;
|
||||
struct perf_event *event;
|
||||
struct file *perf_file;
|
||||
u64 value;
|
||||
|
||||
perf_file = perf_event_get(fd);
|
||||
if (IS_ERR(perf_file))
|
||||
return perf_file;
|
||||
|
||||
ee = ERR_PTR(-EOPNOTSUPP);
|
||||
event = perf_file->private_data;
|
||||
ee = ERR_PTR(-EINVAL);
|
||||
|
||||
attr = perf_event_attrs(event);
|
||||
if (IS_ERR(attr) || attr->inherit)
|
||||
if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
|
||||
goto err_out;
|
||||
|
||||
switch (attr->type) {
|
||||
case PERF_TYPE_SOFTWARE:
|
||||
if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
|
||||
goto err_out;
|
||||
/* fall-through */
|
||||
case PERF_TYPE_RAW:
|
||||
case PERF_TYPE_HARDWARE:
|
||||
ee = bpf_event_entry_gen(perf_file, map_file);
|
||||
if (ee)
|
||||
return ee;
|
||||
ee = ERR_PTR(-ENOMEM);
|
||||
/* fall-through */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ee = bpf_event_entry_gen(perf_file, map_file);
|
||||
if (ee)
|
||||
return ee;
|
||||
ee = ERR_PTR(-ENOMEM);
|
||||
err_out:
|
||||
fput(perf_file);
|
||||
return ee;
|
||||
|
@ -3636,10 +3636,10 @@ static inline u64 perf_event_count(struct perf_event *event)
|
||||
* will not be local and we cannot read them atomically
|
||||
* - must not have a pmu::count method
|
||||
*/
|
||||
u64 perf_event_read_local(struct perf_event *event)
|
||||
int perf_event_read_local(struct perf_event *event, u64 *value)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 val;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Disabling interrupts avoids all counter scheduling (context
|
||||
@ -3647,25 +3647,37 @@ u64 perf_event_read_local(struct perf_event *event)
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
/* If this is a per-task event, it must be for current */
|
||||
WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
|
||||
event->hw.target != current);
|
||||
|
||||
/* If this is a per-CPU event, it must be for this CPU */
|
||||
WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
|
||||
event->cpu != smp_processor_id());
|
||||
|
||||
/*
|
||||
* It must not be an event with inherit set, we cannot read
|
||||
* all child counters from atomic context.
|
||||
*/
|
||||
WARN_ON_ONCE(event->attr.inherit);
|
||||
if (event->attr.inherit) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* It must not have a pmu::count method, those are not
|
||||
* NMI safe.
|
||||
*/
|
||||
WARN_ON_ONCE(event->pmu->count);
|
||||
if (event->pmu->count) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If this is a per-task event, it must be for current */
|
||||
if ((event->attach_state & PERF_ATTACH_TASK) &&
|
||||
event->hw.target != current) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If this is a per-CPU event, it must be for this CPU */
|
||||
if (!(event->attach_state & PERF_ATTACH_TASK) &&
|
||||
event->cpu != smp_processor_id()) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the event is currently on this CPU, its either a per-task event,
|
||||
@ -3675,10 +3687,11 @@ u64 perf_event_read_local(struct perf_event *event)
|
||||
if (event->oncpu == smp_processor_id())
|
||||
event->pmu->read(event);
|
||||
|
||||
val = local64_read(&event->count);
|
||||
*value = local64_read(&event->count);
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
|
||||
return val;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int perf_event_read(struct perf_event *event, bool group)
|
||||
@ -8037,12 +8050,8 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
||||
bool is_kprobe, is_tracepoint;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (event->attr.type == PERF_TYPE_HARDWARE ||
|
||||
event->attr.type == PERF_TYPE_SOFTWARE)
|
||||
return perf_event_set_bpf_handler(event, prog_fd);
|
||||
|
||||
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
||||
return -EINVAL;
|
||||
return perf_event_set_bpf_handler(event, prog_fd);
|
||||
|
||||
if (event->tp_event->prog)
|
||||
return -EEXIST;
|
||||
|
@ -234,7 +234,8 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
u64 index = flags & BPF_F_INDEX_MASK;
|
||||
struct bpf_event_entry *ee;
|
||||
struct perf_event *event;
|
||||
u64 value = 0;
|
||||
int err;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
||||
return -EINVAL;
|
||||
@ -247,21 +248,14 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
|
||||
if (!ee)
|
||||
return -ENOENT;
|
||||
|
||||
event = ee->event;
|
||||
if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
|
||||
event->attr.type != PERF_TYPE_RAW))
|
||||
return -EINVAL;
|
||||
|
||||
/* make sure event is local and doesn't have pmu::count */
|
||||
if (unlikely(event->oncpu != cpu || event->pmu->count))
|
||||
return -EINVAL;
|
||||
|
||||
err = perf_event_read_local(ee->event, &value);
|
||||
/*
|
||||
* we don't know if the function is run successfully by the
|
||||
* return value. It can be judged in other places, such as
|
||||
* eBPF programs.
|
||||
* this api is ugly since we miss [-22..-2] range of valid
|
||||
* counter values, but that's uapi
|
||||
*/
|
||||
return perf_event_read_local(event);
|
||||
if (err)
|
||||
return err;
|
||||
return value;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_perf_event_read_proto = {
|
||||
|
Loading…
Reference in New Issue
Block a user