forked from Minki/linux
88a71086c4
On ppc64le, trying to build bpf seltests throws the below warning: In file included from runqslower.bpf.c:5: ./runqslower.h:7:8: error: redefinition of 'event' struct event { ^ /home/naveen/linux/tools/testing/selftests/bpf/tools/build/runqslower/vmlinux.h:156602:8: note: previous definition is here struct event { ^ This happens since 'struct event' is defined in drivers/net/ethernet/alteon/acenic.h . Rename the one in runqslower to a more appropriate 'runq_event' to avoid the naming conflict. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/c13cb3767d26257ca4387b8296b632b433a58db6.1641468127.git.naveen.n.rao@linux.vnet.ibm.com
108 lines
2.3 KiB
C
108 lines
2.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (c) 2019 Facebook
|
|
#include "vmlinux.h"
|
|
#include <bpf/bpf_helpers.h>
|
|
#include "runqslower.h"
|
|
|
|
#define TASK_RUNNING 0
|
|
#define BPF_F_CURRENT_CPU 0xffffffffULL
|
|
|
|
const volatile __u64 min_us = 0;
|
|
const volatile pid_t targ_pid = 0;
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
|
|
__uint(map_flags, BPF_F_NO_PREALLOC);
|
|
__type(key, int);
|
|
__type(value, u64);
|
|
} start SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
|
__uint(key_size, sizeof(u32));
|
|
__uint(value_size, sizeof(u32));
|
|
} events SEC(".maps");
|
|
|
|
/* record enqueue timestamp */
|
|
__always_inline
|
|
static int trace_enqueue(struct task_struct *t)
|
|
{
|
|
u32 pid = t->pid;
|
|
u64 *ptr;
|
|
|
|
if (!pid || (targ_pid && targ_pid != pid))
|
|
return 0;
|
|
|
|
ptr = bpf_task_storage_get(&start, t, 0,
|
|
BPF_LOCAL_STORAGE_GET_F_CREATE);
|
|
if (!ptr)
|
|
return 0;
|
|
|
|
*ptr = bpf_ktime_get_ns();
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/sched_wakeup")
|
|
int handle__sched_wakeup(u64 *ctx)
|
|
{
|
|
/* TP_PROTO(struct task_struct *p) */
|
|
struct task_struct *p = (void *)ctx[0];
|
|
|
|
return trace_enqueue(p);
|
|
}
|
|
|
|
SEC("tp_btf/sched_wakeup_new")
|
|
int handle__sched_wakeup_new(u64 *ctx)
|
|
{
|
|
/* TP_PROTO(struct task_struct *p) */
|
|
struct task_struct *p = (void *)ctx[0];
|
|
|
|
return trace_enqueue(p);
|
|
}
|
|
|
|
SEC("tp_btf/sched_switch")
|
|
int handle__sched_switch(u64 *ctx)
|
|
{
|
|
/* TP_PROTO(bool preempt, struct task_struct *prev,
|
|
* struct task_struct *next)
|
|
*/
|
|
struct task_struct *prev = (struct task_struct *)ctx[1];
|
|
struct task_struct *next = (struct task_struct *)ctx[2];
|
|
struct runq_event event = {};
|
|
u64 *tsp, delta_us;
|
|
long state;
|
|
u32 pid;
|
|
|
|
/* ivcsw: treat like an enqueue event and store timestamp */
|
|
if (prev->__state == TASK_RUNNING)
|
|
trace_enqueue(prev);
|
|
|
|
pid = next->pid;
|
|
|
|
/* For pid mismatch, save a bpf_task_storage_get */
|
|
if (!pid || (targ_pid && targ_pid != pid))
|
|
return 0;
|
|
|
|
/* fetch timestamp and calculate delta */
|
|
tsp = bpf_task_storage_get(&start, next, 0, 0);
|
|
if (!tsp)
|
|
return 0; /* missed enqueue */
|
|
|
|
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
|
|
if (min_us && delta_us <= min_us)
|
|
return 0;
|
|
|
|
event.pid = pid;
|
|
event.delta_us = delta_us;
|
|
bpf_get_current_comm(&event.task, sizeof(event.task));
|
|
|
|
/* output */
|
|
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
|
|
&event, sizeof(event));
|
|
|
|
bpf_task_storage_delete(&start, next);
|
|
return 0;
|
|
}
|
|
|
|
char LICENSE[] SEC("license") = "GPL";
|