forked from Minki/linux
bpf, libbpf: simplify and cleanup perf ring buffer walk
Simplify bpf_perf_event_read_simple() a bit and fix up some minor things along the way: the return code in the header is not of type int but enum bpf_perf_event_ret instead. Once callback indicated to break the loop walking event data, it also needs to be consumed in data_tail since it has been processed already. Moreover, bpf_perf_event_print_t callback should avoid void * as we actually get a pointer to struct perf_event_header and thus applications can make use of container_of() to have type checks. The walk also doesn't have to use modulo op since the ring size is required to be power of two. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
84430d4232
commit
3dca21156b
@ -50,15 +50,17 @@ static void int_exit(int signo)
|
||||
stop = true;
|
||||
}
|
||||
|
||||
static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv)
|
||||
static enum bpf_perf_event_ret
|
||||
print_bpf_output(struct perf_event_header *event, void *private_data)
|
||||
{
|
||||
struct event_ring_info *ring = priv;
|
||||
struct perf_event_sample *e = event;
|
||||
struct perf_event_sample *e = container_of(event, struct perf_event_sample,
|
||||
header);
|
||||
struct event_ring_info *ring = private_data;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
__u64 id;
|
||||
__u64 lost;
|
||||
} *lost = event;
|
||||
} *lost = (typeof(lost))event;
|
||||
|
||||
if (json_output) {
|
||||
jsonw_start_object(json_wtr);
|
||||
|
@ -2415,56 +2415,47 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
|
||||
}
|
||||
|
||||
enum bpf_perf_event_ret
|
||||
bpf_perf_event_read_simple(void *mem, unsigned long size,
|
||||
unsigned long page_size, void **buf, size_t *buf_len,
|
||||
bpf_perf_event_print_t fn, void *priv)
|
||||
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
|
||||
void **copy_mem, size_t *copy_size,
|
||||
bpf_perf_event_print_t fn, void *private_data)
|
||||
{
|
||||
struct perf_event_mmap_page *header = mem;
|
||||
struct perf_event_mmap_page *header = mmap_mem;
|
||||
__u64 data_head = ring_buffer_read_head(header);
|
||||
__u64 data_tail = header->data_tail;
|
||||
int ret = LIBBPF_PERF_EVENT_ERROR;
|
||||
void *base, *begin, *end;
|
||||
void *base = ((__u8 *)header) + page_size;
|
||||
int ret = LIBBPF_PERF_EVENT_CONT;
|
||||
struct perf_event_header *ehdr;
|
||||
size_t ehdr_size;
|
||||
|
||||
if (data_head == data_tail)
|
||||
return LIBBPF_PERF_EVENT_CONT;
|
||||
while (data_head != data_tail) {
|
||||
ehdr = base + (data_tail & (mmap_size - 1));
|
||||
ehdr_size = ehdr->size;
|
||||
|
||||
base = ((char *)header) + page_size;
|
||||
if (((void *)ehdr) + ehdr_size > base + mmap_size) {
|
||||
void *copy_start = ehdr;
|
||||
size_t len_first = base + mmap_size - copy_start;
|
||||
size_t len_secnd = ehdr_size - len_first;
|
||||
|
||||
begin = base + data_tail % size;
|
||||
end = base + data_head % size;
|
||||
|
||||
while (begin != end) {
|
||||
struct perf_event_header *ehdr;
|
||||
|
||||
ehdr = begin;
|
||||
if (begin + ehdr->size > base + size) {
|
||||
long len = base + size - begin;
|
||||
|
||||
if (*buf_len < ehdr->size) {
|
||||
free(*buf);
|
||||
*buf = malloc(ehdr->size);
|
||||
if (!*buf) {
|
||||
if (*copy_size < ehdr_size) {
|
||||
free(*copy_mem);
|
||||
*copy_mem = malloc(ehdr_size);
|
||||
if (!*copy_mem) {
|
||||
*copy_size = 0;
|
||||
ret = LIBBPF_PERF_EVENT_ERROR;
|
||||
break;
|
||||
}
|
||||
*buf_len = ehdr->size;
|
||||
*copy_size = ehdr_size;
|
||||
}
|
||||
|
||||
memcpy(*buf, begin, len);
|
||||
memcpy(*buf + len, base, ehdr->size - len);
|
||||
ehdr = (void *)*buf;
|
||||
begin = base + ehdr->size - len;
|
||||
} else if (begin + ehdr->size == base + size) {
|
||||
begin = base;
|
||||
} else {
|
||||
begin += ehdr->size;
|
||||
memcpy(*copy_mem, copy_start, len_first);
|
||||
memcpy(*copy_mem + len_first, base, len_secnd);
|
||||
ehdr = *copy_mem;
|
||||
}
|
||||
|
||||
ret = fn(ehdr, priv);
|
||||
ret = fn(ehdr, private_data);
|
||||
data_tail += ehdr_size;
|
||||
if (ret != LIBBPF_PERF_EVENT_CONT)
|
||||
break;
|
||||
|
||||
data_tail += ehdr->size;
|
||||
}
|
||||
|
||||
ring_buffer_write_tail(header, data_tail);
|
||||
|
@ -297,13 +297,14 @@ enum bpf_perf_event_ret {
|
||||
LIBBPF_PERF_EVENT_CONT = -2,
|
||||
};
|
||||
|
||||
typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(void *event,
|
||||
void *priv);
|
||||
LIBBPF_API int bpf_perf_event_read_simple(void *mem, unsigned long size,
|
||||
unsigned long page_size,
|
||||
void **buf, size_t *buf_len,
|
||||
bpf_perf_event_print_t fn,
|
||||
void *priv);
|
||||
struct perf_event_header;
|
||||
typedef enum bpf_perf_event_ret
|
||||
(*bpf_perf_event_print_t)(struct perf_event_header *hdr,
|
||||
void *private_data);
|
||||
LIBBPF_API enum bpf_perf_event_ret
|
||||
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
|
||||
void **copy_mem, size_t *copy_size,
|
||||
bpf_perf_event_print_t fn, void *private_data);
|
||||
|
||||
struct nlattr;
|
||||
typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
|
||||
|
@ -125,10 +125,11 @@ struct perf_event_sample {
|
||||
char data[];
|
||||
};
|
||||
|
||||
static enum bpf_perf_event_ret bpf_perf_event_print(void *event, void *priv)
|
||||
static enum bpf_perf_event_ret
|
||||
bpf_perf_event_print(struct perf_event_header *hdr, void *private_data)
|
||||
{
|
||||
struct perf_event_sample *e = event;
|
||||
perf_event_print_fn fn = priv;
|
||||
struct perf_event_sample *e = (struct perf_event_sample *)hdr;
|
||||
perf_event_print_fn fn = private_data;
|
||||
int ret;
|
||||
|
||||
if (e->header.type == PERF_RECORD_SAMPLE) {
|
||||
|
Loading…
Reference in New Issue
Block a user