2023-08-18 09:01:12 +00:00
|
|
|
#include "vmlinux.h"
|
2015-08-06 07:02:36 +00:00
|
|
|
#include <linux/version.h>
|
2020-01-20 13:06:49 +00:00
|
|
|
#include <bpf/bpf_helpers.h>
|
2023-08-18 09:01:17 +00:00
|
|
|
#include <bpf/bpf_tracing.h>
|
|
|
|
#include <bpf/bpf_core_read.h>
|
2015-08-06 07:02:36 +00:00
|
|
|
|
2020-05-16 04:06:08 +00:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
|
|
|
__uint(key_size, sizeof(int));
|
|
|
|
__uint(value_size, sizeof(u32));
|
|
|
|
__uint(max_entries, 64);
|
|
|
|
} counters SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__type(key, int);
|
|
|
|
__type(value, u64);
|
|
|
|
__uint(max_entries, 64);
|
|
|
|
} values SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__type(key, int);
|
|
|
|
__type(value, struct bpf_perf_event_value);
|
|
|
|
__uint(max_entries, 64);
|
|
|
|
} values2 SEC(".maps");
|
2015-08-06 07:02:36 +00:00
|
|
|
|
2017-06-03 04:03:53 +00:00
|
|
|
SEC("kprobe/htab_map_get_next_key")
|
2015-08-06 07:02:36 +00:00
|
|
|
int bpf_prog1(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
u32 key = bpf_get_smp_processor_id();
|
2017-06-03 04:03:53 +00:00
|
|
|
u64 count, *val;
|
|
|
|
s64 error;
|
|
|
|
|
|
|
|
count = bpf_perf_event_read(&counters, key);
|
|
|
|
error = (s64)count;
|
|
|
|
if (error <= -2 && error >= -22)
|
|
|
|
return 0;
|
2015-08-06 07:02:36 +00:00
|
|
|
|
2017-06-03 04:03:53 +00:00
|
|
|
val = bpf_map_lookup_elem(&values, &key);
|
|
|
|
if (val)
|
|
|
|
*val = count;
|
|
|
|
else
|
|
|
|
bpf_map_update_elem(&values, &key, &count, BPF_NOEXIST);
|
2015-08-06 07:02:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-08-18 09:01:17 +00:00
|
|
|
/*
|
|
|
|
* Since *_map_lookup_elem can't be expected to trigger bpf programs
|
|
|
|
* due to potential deadlocks (bpf_disable_instrumentation), this bpf
|
|
|
|
* program will be attached to bpf_map_copy_value (which is called
|
|
|
|
* from map_lookup_elem) and will only filter the hashtable type.
|
|
|
|
*/
|
|
|
|
SEC("kprobe/bpf_map_copy_value")
|
|
|
|
int BPF_KPROBE(bpf_prog2, struct bpf_map *map)
|
2017-10-05 16:19:21 +00:00
|
|
|
{
|
|
|
|
u32 key = bpf_get_smp_processor_id();
|
|
|
|
struct bpf_perf_event_value *val, buf;
|
2023-08-18 09:01:17 +00:00
|
|
|
enum bpf_map_type type;
|
2017-10-05 16:19:21 +00:00
|
|
|
int error;
|
|
|
|
|
2023-08-18 09:01:17 +00:00
|
|
|
type = BPF_CORE_READ(map, map_type);
|
|
|
|
if (type != BPF_MAP_TYPE_HASH)
|
|
|
|
return 0;
|
|
|
|
|
2017-10-05 16:19:21 +00:00
|
|
|
error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf));
|
|
|
|
if (error)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val = bpf_map_lookup_elem(&values2, &key);
|
|
|
|
if (val)
|
|
|
|
*val = buf;
|
|
|
|
else
|
|
|
|
bpf_map_update_elem(&values2, &key, &buf, BPF_NOEXIST);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-06 07:02:36 +00:00
|
|
|
char _license[] SEC("license") = "GPL";
|
|
|
|
u32 _version SEC("version") = LINUX_VERSION_CODE;
|