Merge branch 'introduce BPF_F_PRESERVE_ELEMS'

Song Liu says:

====================
This set introduces BPF_F_PRESERVE_ELEMS to perf event array for better
sharing of perf event. By default, perf event array removes the perf event
when the map fd used to add the event is closed. With BPF_F_PRESERVE_ELEMS
set, however, the perf event will stay in the array until it is removed, or
the map is closed.
---
Changes v3 => v5:
1. Clean up in selftest. (Alexei)

Changes v2 => v3:
1. Move perf_event_fd_array_map_free() to avoid unnecessary forward
   declaration. (Daniel)

Changes v1 => v2:
1. Rename the flag as BPF_F_PRESERVE_ELEMS. (Alexei, Daniel)
2. Simplify the code and selftest. (Daniel, Alexei)
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2020-09-30 23:18:12 -07:00
commit 6208689fb3
5 changed files with 127 additions and 2 deletions

View File

@ -414,6 +414,9 @@ enum {
/* Enable memory-mapping BPF map */
BPF_F_MMAPABLE = (1U << 10),
/* Share perf_event among processes */
BPF_F_PRESERVE_ELEMS = (1U << 11),
};
/* Flags for BPF_PROG_QUERY. */

View File

@ -15,7 +15,8 @@
#include "map_in_map.h"
#define ARRAY_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
BPF_F_PRESERVE_ELEMS)
static void bpf_array_free_percpu(struct bpf_array *array)
{
@ -64,6 +65,10 @@ int array_map_alloc_check(union bpf_attr *attr)
attr->map_flags & BPF_F_MMAPABLE)
return -EINVAL;
if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
attr->map_flags & BPF_F_PRESERVE_ELEMS)
return -EINVAL;
if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
@ -1134,6 +1139,9 @@ static void perf_event_fd_array_release(struct bpf_map *map,
struct bpf_event_entry *ee;
int i;
if (map->map_flags & BPF_F_PRESERVE_ELEMS)
return;
rcu_read_lock();
for (i = 0; i < array->map.max_entries; i++) {
ee = READ_ONCE(array->ptrs[i]);
@ -1143,12 +1151,19 @@ static void perf_event_fd_array_release(struct bpf_map *map,
rcu_read_unlock();
}
static void perf_event_fd_array_map_free(struct bpf_map *map)
{
if (map->map_flags & BPF_F_PRESERVE_ELEMS)
bpf_fd_array_map_clear(map);
fd_array_map_free(map);
}
static int perf_event_array_map_btf_id;
const struct bpf_map_ops perf_event_array_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = fd_array_map_free,
.map_free = perf_event_fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
.map_delete_elem = fd_array_map_delete_elem,

View File

@ -414,6 +414,9 @@ enum {
/* Enable memory-mapping BPF map */
BPF_F_MMAPABLE = (1U << 10),
/* Share perf_event among processes */
BPF_F_PRESERVE_ELEMS = (1U << 11),
};
/* Flags for BPF_PROG_QUERY. */

View File

@ -0,0 +1,66 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include <linux/bpf.h>
#include "test_pe_preserve_elems.skel.h"
static int duration;
static void test_one_map(struct bpf_map *map, struct bpf_program *prog,
bool has_share_pe)
{
int err, key = 0, pfd = -1, mfd = bpf_map__fd(map);
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts);
struct perf_event_attr attr = {
.size = sizeof(struct perf_event_attr),
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
pfd = syscall(__NR_perf_event_open, &attr, 0 /* pid */,
-1 /* cpu 0 */, -1 /* group id */, 0 /* flags */);
if (CHECK(pfd < 0, "perf_event_open", "failed\n"))
return;
err = bpf_map_update_elem(mfd, &key, &pfd, BPF_ANY);
close(pfd);
if (CHECK(err < 0, "bpf_map_update_elem", "failed\n"))
return;
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n"))
return;
if (CHECK(opts.retval != 0, "bpf_perf_event_read_value",
"failed with %d\n", opts.retval))
return;
/* closing mfd, prog still holds a reference on map */
close(mfd);
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n"))
return;
if (has_share_pe) {
CHECK(opts.retval != 0, "bpf_perf_event_read_value",
"failed with %d\n", opts.retval);
} else {
CHECK(opts.retval != -ENOENT, "bpf_perf_event_read_value",
"should have failed with %d, but got %d\n", -ENOENT,
opts.retval);
}
}
void test_pe_preserve_elems(void)
{
struct test_pe_preserve_elems *skel;
skel = test_pe_preserve_elems__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
test_one_map(skel->maps.array_1, skel->progs.read_array_1, false);
test_one_map(skel->maps.array_2, skel->progs.read_array_2, true);
test_pe_preserve_elems__destroy(skel);
}

View File

@ -0,0 +1,38 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
} array_1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} array_2 SEC(".maps");
SEC("raw_tp/sched_switch")
int BPF_PROG(read_array_1)
{
struct bpf_perf_event_value val;
return bpf_perf_event_read_value(&array_1, 0, &val, sizeof(val));
}
SEC("raw_tp/task_rename")
int BPF_PROG(read_array_2)
{
struct bpf_perf_event_value val;
return bpf_perf_event_read_value(&array_2, 0, &val, sizeof(val));
}
char LICENSE[] SEC("license") = "GPL";