mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
Merge branch 'bpf-verifier-log-rotation'
Andrii Nakryiko says: ==================== This patch set changes BPF verifier log behavior to behave as a rotating log, by default. If user-supplied log buffer is big enough to contain entire verifier log output, there is no effective difference. But where previously user supplied too small log buffer and would get -ENOSPC error result and the beginning part of the verifier log, now there will be no error and user will get ending part of verifier log filling up user-supplied log buffer. Which is, in absolute majority of cases, is exactly what's useful, relevant, and what users want and need, as the ending of the verifier log is containing details of verifier failure and relevant state that got us to that failure. So this rotating mode is made default, but for some niche advanced debugging scenarios it's possible to request old behavior by specifying additional BPF_LOG_FIXED (8) flag. This patch set adjusts libbpf to allow specifying flags beyond 1 | 2 | 4. We also add --log-size and --log-fixed options to veristat to be able to both test this functionality manually, but also to be used in various debugging scenarios. We also add selftests that tries many variants of log buffer size to stress-test correctness of internal verifier log bookkeeping code. Further, this patch set is merged with log_size_actual v1 patchset ([0]), which adds ability to get required log buffer size to fit entire verifier log output. This addresses a long-standing limitation, which causes users and BPF loader library writers to guess and pre-size log buffer, often allocating unnecessary extra memory for this or doing extra program verifications just to size logs better, ultimately wasting resources. This was requested most recently by Go BPF library maintainers ([1]). See respective patches for details. A bunch of them some drive-by fixes detecting during working with the code. Some other further refactor and compratmentalize verifier log handling code into kernel/bpf/log.c, which should also make it simpler to integrate such verbose log for other complicated bpf() syscall commands, if necessary. The rest are actual logic to calculate maximum log buffer size needed and return it to user-space. Few patches wire this on libbpf side, and the rest add selftests to test proper log truncation and log_buf==NULL handling. This turned into a pretty sizable patch set with lots of arithmetics, but hopefully the set of features added to verifier log in this patch set are both useful for BPF users and are self-contained and isolated enough to not cause troubles going forward. v3->v4: - s/log_size_actual/log_true_size/ (Alexei); - log_buf==NULL && log_size==0 don't trigger -ENOSPC (Lorenz); - added WARN_ON_ONCE if we try bpf_vlog_reset() forward (Lorenz); - added selftests for truncation in BPF_LOG_FIXED mode; - fixed edge case in BPF_LOG_FIXED when log_size==1, leaving buf not zero terminated; v2->v3: - typos and comment improvement (Lorenz); - merged with log_size_actual v1 ([0]) patch set (Alexei); - added log_buf==NULL condition allowed (Lorenz); - added BPF_BTF_LOAD logs tests (Lorenz); - more clean up and refactoring of internal verifier log API; v1->v2: - return -ENOSPC even in rotating log mode for preserving backwards compatibility (Lorenz); [0] https://patchwork.kernel.org/project/netdevbpf/list/?series=735213&state=* [1] https://lore.kernel.org/bpf/CAN+4W8iNoEbQzQVbB_o1W0MWBDV4xCJAq7K3f6psVE-kkCfMqg@mail.gmail.com/ ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
commit
255f0e14b9
@ -2175,7 +2175,7 @@ int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
|
||||
size_t actual_size);
|
||||
|
||||
/* verify correctness of eBPF program */
|
||||
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
|
||||
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
|
||||
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
|
||||
|
@ -491,39 +491,36 @@ struct bpf_insn_aux_data {
|
||||
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
|
||||
|
||||
struct bpf_verifier_log {
|
||||
u32 level;
|
||||
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
|
||||
/* Logical start and end positions of a "log window" of the verifier log.
|
||||
* start_pos == 0 means we haven't truncated anything.
|
||||
* Once truncation starts to happen, start_pos + len_total == end_pos,
|
||||
* except during log reset situations, in which (end_pos - start_pos)
|
||||
* might get smaller than len_total (see bpf_vlog_reset()).
|
||||
* Generally, (end_pos - start_pos) gives number of useful data in
|
||||
* user log buffer.
|
||||
*/
|
||||
u64 start_pos;
|
||||
u64 end_pos;
|
||||
char __user *ubuf;
|
||||
u32 len_used;
|
||||
u32 level;
|
||||
u32 len_total;
|
||||
u32 len_max;
|
||||
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
|
||||
};
|
||||
|
||||
static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
|
||||
{
|
||||
return log->len_used >= log->len_total - 1;
|
||||
}
|
||||
|
||||
#define BPF_LOG_LEVEL1 1
|
||||
#define BPF_LOG_LEVEL2 2
|
||||
#define BPF_LOG_STATS 4
|
||||
#define BPF_LOG_FIXED 8
|
||||
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
|
||||
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
|
||||
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
|
||||
#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
|
||||
#define BPF_LOG_MIN_ALIGNMENT 8U
|
||||
#define BPF_LOG_ALIGNMENT 40U
|
||||
|
||||
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
|
||||
{
|
||||
return log &&
|
||||
((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
|
||||
log->level == BPF_LOG_KERNEL);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
|
||||
{
|
||||
return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
|
||||
log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
|
||||
return log && log->level;
|
||||
}
|
||||
|
||||
#define BPF_MAX_SUBPROGS 256
|
||||
@ -603,7 +600,7 @@ struct bpf_verifier_env {
|
||||
u32 scratched_regs;
|
||||
/* Same as scratched_regs but for stack slots */
|
||||
u64 scratched_stack_slots;
|
||||
u32 prev_log_len, prev_insn_print_len;
|
||||
u64 prev_log_pos, prev_insn_print_pos;
|
||||
/* buffer used in reg_type_str() to generate reg_type string */
|
||||
char type_str_buf[TYPE_STR_BUF_LEN];
|
||||
};
|
||||
@ -614,6 +611,10 @@ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
|
||||
const char *fmt, ...);
|
||||
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
|
||||
const char *fmt, ...);
|
||||
int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
|
||||
char __user *log_buf, u32 log_size);
|
||||
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
|
||||
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
|
||||
|
||||
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
|
||||
{
|
||||
|
@ -125,7 +125,7 @@ extern const struct file_operations btf_fops;
|
||||
|
||||
void btf_get(struct btf *btf);
|
||||
void btf_put(struct btf *btf);
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr);
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
|
||||
struct btf *btf_get_by_fd(int fd);
|
||||
int btf_get_info_by_fd(const struct btf *btf,
|
||||
const union bpf_attr *attr,
|
||||
|
@ -1407,6 +1407,11 @@ union bpf_attr {
|
||||
__aligned_u64 fd_array; /* array of FDs */
|
||||
__aligned_u64 core_relos;
|
||||
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@ -1492,6 +1497,11 @@ union bpf_attr {
|
||||
__u32 btf_size;
|
||||
__u32 btf_log_size;
|
||||
__u32 btf_log_level;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 btf_log_true_size;
|
||||
};
|
||||
|
||||
struct {
|
||||
|
@ -6,7 +6,8 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
|
||||
endif
|
||||
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
|
||||
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
|
||||
|
@ -5504,38 +5504,45 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
u32 log_level, char __user *log_ubuf, u32 log_size)
|
||||
static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
struct btf_struct_metas *struct_meta_tab;
|
||||
struct btf_verifier_env *env = NULL;
|
||||
struct bpf_verifier_log *log;
|
||||
struct btf *btf = NULL;
|
||||
u8 *data;
|
||||
u32 log_true_size;
|
||||
int err;
|
||||
|
||||
if (btf_data_size > BTF_MAX_SIZE)
|
||||
err = bpf_vlog_finalize(log, &log_true_size);
|
||||
|
||||
if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) &&
|
||||
copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size),
|
||||
&log_true_size, sizeof(log_true_size)))
|
||||
err = -EFAULT;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
|
||||
char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
|
||||
struct btf_struct_metas *struct_meta_tab;
|
||||
struct btf_verifier_env *env = NULL;
|
||||
struct btf *btf = NULL;
|
||||
u8 *data;
|
||||
int err, ret;
|
||||
|
||||
if (attr->btf_size > BTF_MAX_SIZE)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!env)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
log = &env->log;
|
||||
if (log_level || log_ubuf || log_size) {
|
||||
/* user requested verbose verifier output
|
||||
* and supplied buffer to store the verification trace
|
||||
*/
|
||||
log->level = log_level;
|
||||
log->ubuf = log_ubuf;
|
||||
log->len_total = log_size;
|
||||
|
||||
/* log attributes have to be sane */
|
||||
if (!bpf_verifier_log_attr_valid(log)) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
/* user could have requested verbose verifier output
|
||||
* and supplied buffer to store the verification trace
|
||||
*/
|
||||
err = bpf_vlog_init(&env->log, attr->btf_log_level,
|
||||
log_ubuf, attr->btf_log_size);
|
||||
if (err)
|
||||
goto errout_free;
|
||||
|
||||
btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!btf) {
|
||||
@ -5544,16 +5551,16 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
}
|
||||
env->btf = btf;
|
||||
|
||||
data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
|
||||
data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!data) {
|
||||
err = -ENOMEM;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
btf->data = data;
|
||||
btf->data_size = btf_data_size;
|
||||
btf->data_size = attr->btf_size;
|
||||
|
||||
if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
|
||||
if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
|
||||
err = -EFAULT;
|
||||
goto errout;
|
||||
}
|
||||
@ -5576,7 +5583,7 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
struct_meta_tab = btf_parse_struct_metas(log, btf);
|
||||
struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
|
||||
if (IS_ERR(struct_meta_tab)) {
|
||||
err = PTR_ERR(struct_meta_tab);
|
||||
goto errout;
|
||||
@ -5593,10 +5600,9 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
}
|
||||
}
|
||||
|
||||
if (log->level && bpf_verifier_log_full(log)) {
|
||||
err = -ENOSPC;
|
||||
goto errout_meta;
|
||||
}
|
||||
err = finalize_log(&env->log, uattr, uattr_size);
|
||||
if (err)
|
||||
goto errout_free;
|
||||
|
||||
btf_verifier_env_free(env);
|
||||
refcount_set(&btf->refcnt, 1);
|
||||
@ -5605,6 +5611,11 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
errout_meta:
|
||||
btf_free_struct_meta_tab(btf);
|
||||
errout:
|
||||
/* overwrite err with -ENOSPC or -EFAULT */
|
||||
ret = finalize_log(&env->log, uattr, uattr_size);
|
||||
if (ret)
|
||||
err = ret;
|
||||
errout_free:
|
||||
btf_verifier_env_free(env);
|
||||
if (btf)
|
||||
btf_free(btf);
|
||||
@ -7213,15 +7224,12 @@ static int __btf_new_fd(struct btf *btf)
|
||||
return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
|
||||
}
|
||||
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
struct btf *btf;
|
||||
int ret;
|
||||
|
||||
btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
|
||||
attr->btf_size, attr->btf_log_level,
|
||||
u64_to_user_ptr(attr->btf_log_buf),
|
||||
attr->btf_log_size);
|
||||
btf = btf_parse(attr, uattr, uattr_size);
|
||||
if (IS_ERR(btf))
|
||||
return PTR_ERR(btf);
|
||||
|
||||
|
330
kernel/bpf/log.c
Normal file
330
kernel/bpf/log.c
Normal file
@ -0,0 +1,330 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
||||
* Copyright (c) 2016 Facebook
|
||||
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
|
||||
*/
|
||||
#include <uapi/linux/btf.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
|
||||
{
|
||||
/* ubuf and len_total should both be specified (or not) together */
|
||||
if (!!log->ubuf != !!log->len_total)
|
||||
return false;
|
||||
/* log buf without log_level is meaningless */
|
||||
if (log->ubuf && log->level == 0)
|
||||
return false;
|
||||
if (log->level & ~BPF_LOG_MASK)
|
||||
return false;
|
||||
if (log->len_total > UINT_MAX >> 2)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
|
||||
char __user *log_buf, u32 log_size)
|
||||
{
|
||||
log->level = log_level;
|
||||
log->ubuf = log_buf;
|
||||
log->len_total = log_size;
|
||||
|
||||
/* log attributes have to be sane */
|
||||
if (!bpf_verifier_log_attr_valid(log))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
|
||||
{
|
||||
/* add_len includes terminal \0, so no need for +1. */
|
||||
u64 len = log->end_pos + add_len;
|
||||
|
||||
/* log->len_max could be larger than our current len due to
|
||||
* bpf_vlog_reset() calls, so we maintain the max of any length at any
|
||||
* previous point
|
||||
*/
|
||||
if (len > UINT_MAX)
|
||||
log->len_max = UINT_MAX;
|
||||
else if (len > log->len_max)
|
||||
log->len_max = len;
|
||||
}
|
||||
|
||||
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
|
||||
va_list args)
|
||||
{
|
||||
u64 cur_pos;
|
||||
u32 new_n, n;
|
||||
|
||||
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
|
||||
|
||||
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
|
||||
"verifier log line truncated - local buffer too short\n");
|
||||
|
||||
if (log->level == BPF_LOG_KERNEL) {
|
||||
bool newline = n > 0 && log->kbuf[n - 1] == '\n';
|
||||
|
||||
pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
|
||||
return;
|
||||
}
|
||||
|
||||
n += 1; /* include terminating zero */
|
||||
bpf_vlog_update_len_max(log, n);
|
||||
|
||||
if (log->level & BPF_LOG_FIXED) {
|
||||
/* check if we have at least something to put into user buf */
|
||||
new_n = 0;
|
||||
if (log->end_pos < log->len_total) {
|
||||
new_n = min_t(u32, log->len_total - log->end_pos, n);
|
||||
log->kbuf[new_n - 1] = '\0';
|
||||
}
|
||||
|
||||
cur_pos = log->end_pos;
|
||||
log->end_pos += n - 1; /* don't count terminating '\0' */
|
||||
|
||||
if (log->ubuf && new_n &&
|
||||
copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
|
||||
goto fail;
|
||||
} else {
|
||||
u64 new_end, new_start;
|
||||
u32 buf_start, buf_end, new_n;
|
||||
|
||||
new_end = log->end_pos + n;
|
||||
if (new_end - log->start_pos >= log->len_total)
|
||||
new_start = new_end - log->len_total;
|
||||
else
|
||||
new_start = log->start_pos;
|
||||
|
||||
log->start_pos = new_start;
|
||||
log->end_pos = new_end - 1; /* don't count terminating '\0' */
|
||||
|
||||
if (!log->ubuf)
|
||||
return;
|
||||
|
||||
new_n = min(n, log->len_total);
|
||||
cur_pos = new_end - new_n;
|
||||
div_u64_rem(cur_pos, log->len_total, &buf_start);
|
||||
div_u64_rem(new_end, log->len_total, &buf_end);
|
||||
/* new_end and buf_end are exclusive indices, so if buf_end is
|
||||
* exactly zero, then it actually points right to the end of
|
||||
* ubuf and there is no wrap around
|
||||
*/
|
||||
if (buf_end == 0)
|
||||
buf_end = log->len_total;
|
||||
|
||||
/* if buf_start > buf_end, we wrapped around;
|
||||
* if buf_start == buf_end, then we fill ubuf completely; we
|
||||
* can't have buf_start == buf_end to mean that there is
|
||||
* nothing to write, because we always write at least
|
||||
* something, even if terminal '\0'
|
||||
*/
|
||||
if (buf_start < buf_end) {
|
||||
/* message fits within contiguous chunk of ubuf */
|
||||
if (copy_to_user(log->ubuf + buf_start,
|
||||
log->kbuf + n - new_n,
|
||||
buf_end - buf_start))
|
||||
goto fail;
|
||||
} else {
|
||||
/* message wraps around the end of ubuf, copy in two chunks */
|
||||
if (copy_to_user(log->ubuf + buf_start,
|
||||
log->kbuf + n - new_n,
|
||||
log->len_total - buf_start))
|
||||
goto fail;
|
||||
if (copy_to_user(log->ubuf,
|
||||
log->kbuf + n - buf_end,
|
||||
buf_end))
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
fail:
|
||||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
|
||||
{
|
||||
char zero = 0;
|
||||
u32 pos;
|
||||
|
||||
if (WARN_ON_ONCE(new_pos > log->end_pos))
|
||||
return;
|
||||
|
||||
if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
|
||||
return;
|
||||
|
||||
/* if position to which we reset is beyond current log window,
|
||||
* then we didn't preserve any useful content and should adjust
|
||||
* start_pos to end up with an empty log (start_pos == end_pos)
|
||||
*/
|
||||
log->end_pos = new_pos;
|
||||
if (log->end_pos < log->start_pos)
|
||||
log->start_pos = log->end_pos;
|
||||
|
||||
if (!log->ubuf)
|
||||
return;
|
||||
|
||||
if (log->level & BPF_LOG_FIXED)
|
||||
pos = log->end_pos + 1;
|
||||
else
|
||||
div_u64_rem(new_pos, log->len_total, &pos);
|
||||
|
||||
if (pos < log->len_total && put_user(zero, log->ubuf + pos))
|
||||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
static void bpf_vlog_reverse_kbuf(char *buf, int len)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0, j = len - 1; i < j; i++, j--)
|
||||
swap(buf[i], buf[j]);
|
||||
}
|
||||
|
||||
static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
|
||||
{
|
||||
/* we split log->kbuf into two equal parts for both ends of array */
|
||||
int n = sizeof(log->kbuf) / 2, nn;
|
||||
char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
|
||||
|
||||
/* Read ubuf's section [start, end) two chunks at a time, from left
|
||||
* and right side; within each chunk, swap all the bytes; after that
|
||||
* reverse the order of lbuf and rbuf and write result back to ubuf.
|
||||
* This way we'll end up with swapped contents of specified
|
||||
* [start, end) ubuf segment.
|
||||
*/
|
||||
while (end - start > 1) {
|
||||
nn = min(n, (end - start ) / 2);
|
||||
|
||||
if (copy_from_user(lbuf, log->ubuf + start, nn))
|
||||
return -EFAULT;
|
||||
if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
|
||||
return -EFAULT;
|
||||
|
||||
bpf_vlog_reverse_kbuf(lbuf, nn);
|
||||
bpf_vlog_reverse_kbuf(rbuf, nn);
|
||||
|
||||
/* we write lbuf to the right end of ubuf, while rbuf to the
|
||||
* left one to end up with properly reversed overall ubuf
|
||||
*/
|
||||
if (copy_to_user(log->ubuf + start, rbuf, nn))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
|
||||
return -EFAULT;
|
||||
|
||||
start += nn;
|
||||
end -= nn;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
|
||||
{
|
||||
u32 sublen;
|
||||
int err;
|
||||
|
||||
*log_size_actual = 0;
|
||||
if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
|
||||
return 0;
|
||||
|
||||
if (!log->ubuf)
|
||||
goto skip_log_rotate;
|
||||
/* If we never truncated log, there is nothing to move around. */
|
||||
if (log->start_pos == 0)
|
||||
goto skip_log_rotate;
|
||||
|
||||
/* Otherwise we need to rotate log contents to make it start from the
|
||||
* buffer beginning and be a continuous zero-terminated string. Note
|
||||
* that if log->start_pos != 0 then we definitely filled up entire log
|
||||
* buffer with no gaps, and we just need to shift buffer contents to
|
||||
* the left by (log->start_pos % log->len_total) bytes.
|
||||
*
|
||||
* Unfortunately, user buffer could be huge and we don't want to
|
||||
* allocate temporary kernel memory of the same size just to shift
|
||||
* contents in a straightforward fashion. Instead, we'll be clever and
|
||||
* do in-place array rotation. This is a leetcode-style problem, which
|
||||
* could be solved by three rotations.
|
||||
*
|
||||
* Let's say we have log buffer that has to be shifted left by 7 bytes
|
||||
* (spaces and vertical bar is just for demonstrative purposes):
|
||||
* E F G H I J K | A B C D
|
||||
*
|
||||
* First, we reverse entire array:
|
||||
* D C B A | K J I H G F E
|
||||
*
|
||||
* Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
|
||||
* (KJIHGFE), resulting in a properly rotated array:
|
||||
* A B C D | E F G H I J K
|
||||
*
|
||||
* We'll utilize log->kbuf to read user memory chunk by chunk, swap
|
||||
* bytes, and write them back. Doing it byte-by-byte would be
|
||||
* unnecessarily inefficient. Altogether we are going to read and
|
||||
* write each byte twice, for total 4 memory copies between kernel and
|
||||
* user space.
|
||||
*/
|
||||
|
||||
/* length of the chopped off part that will be the beginning;
|
||||
* len(ABCD) in the example above
|
||||
*/
|
||||
div_u64_rem(log->start_pos, log->len_total, &sublen);
|
||||
sublen = log->len_total - sublen;
|
||||
|
||||
err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
|
||||
err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
|
||||
err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
|
||||
if (err)
|
||||
log->ubuf = NULL;
|
||||
|
||||
skip_log_rotate:
|
||||
*log_size_actual = log->len_max;
|
||||
|
||||
/* properly initialized log has either both ubuf!=NULL and len_total>0
|
||||
* or ubuf==NULL and len_total==0, so if this condition doesn't hold,
|
||||
* we got a fault somewhere along the way, so report it back
|
||||
*/
|
||||
if (!!log->ubuf != !!log->len_total)
|
||||
return -EFAULT;
|
||||
|
||||
/* did truncation actually happen? */
|
||||
if (log->ubuf && log->len_max > log->len_total)
|
||||
return -ENOSPC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* log_level controls verbosity level of eBPF verifier.
|
||||
* bpf_verifier_log_write() is used to dump the verification trace to the log,
|
||||
* so the user can figure out what's wrong with the program
|
||||
*/
|
||||
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (!bpf_verifier_log_needed(&env->log))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
bpf_verifier_vlog(&env->log, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
|
||||
|
||||
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (!bpf_verifier_log_needed(log))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
bpf_verifier_vlog(log, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_log);
|
@ -2501,9 +2501,9 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
|
||||
}
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size
|
||||
#define BPF_PROG_LOAD_LAST_FIELD log_true_size
|
||||
|
||||
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
|
||||
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
enum bpf_prog_type type = attr->prog_type;
|
||||
struct bpf_prog *prog, *dst_prog = NULL;
|
||||
@ -2653,7 +2653,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
|
||||
goto free_prog_sec;
|
||||
|
||||
/* run eBPF verifier */
|
||||
err = bpf_check(&prog, attr, uattr);
|
||||
err = bpf_check(&prog, attr, uattr, uattr_size);
|
||||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
@ -4371,9 +4371,9 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
|
||||
#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
|
||||
|
||||
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
|
||||
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
|
||||
{
|
||||
if (CHECK_ATTR(BPF_BTF_LOAD))
|
||||
return -EINVAL;
|
||||
@ -4381,7 +4381,7 @@ static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
|
||||
if (!bpf_capable())
|
||||
return -EPERM;
|
||||
|
||||
return btf_new_fd(attr, uattr);
|
||||
return btf_new_fd(attr, uattr, uattr_size);
|
||||
}
|
||||
|
||||
#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
|
||||
@ -5059,7 +5059,7 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
|
||||
err = map_freeze(&attr);
|
||||
break;
|
||||
case BPF_PROG_LOAD:
|
||||
err = bpf_prog_load(&attr, uattr);
|
||||
err = bpf_prog_load(&attr, uattr, size);
|
||||
break;
|
||||
case BPF_OBJ_PIN:
|
||||
err = bpf_obj_pin(&attr);
|
||||
@ -5104,7 +5104,7 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
|
||||
err = bpf_raw_tracepoint_open(&attr);
|
||||
break;
|
||||
case BPF_BTF_LOAD:
|
||||
err = bpf_btf_load(&attr, uattr);
|
||||
err = bpf_btf_load(&attr, uattr, size);
|
||||
break;
|
||||
case BPF_BTF_GET_FD_BY_ID:
|
||||
err = bpf_btf_get_fd_by_id(&attr);
|
||||
|
@ -335,61 +335,6 @@ find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
|
||||
return &linfo[i - 1];
|
||||
}
|
||||
|
||||
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
|
||||
va_list args)
|
||||
{
|
||||
unsigned int n;
|
||||
|
||||
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
|
||||
|
||||
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
|
||||
"verifier log line truncated - local buffer too short\n");
|
||||
|
||||
if (log->level == BPF_LOG_KERNEL) {
|
||||
bool newline = n > 0 && log->kbuf[n - 1] == '\n';
|
||||
|
||||
pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
|
||||
return;
|
||||
}
|
||||
|
||||
n = min(log->len_total - log->len_used - 1, n);
|
||||
log->kbuf[n] = '\0';
|
||||
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
|
||||
log->len_used += n;
|
||||
else
|
||||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
|
||||
{
|
||||
char zero = 0;
|
||||
|
||||
if (!bpf_verifier_log_needed(log))
|
||||
return;
|
||||
|
||||
log->len_used = new_pos;
|
||||
if (put_user(zero, log->ubuf + new_pos))
|
||||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
/* log_level controls verbosity level of eBPF verifier.
|
||||
* bpf_verifier_log_write() is used to dump the verification trace to the log,
|
||||
* so the user can figure out what's wrong with the program
|
||||
*/
|
||||
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (!bpf_verifier_log_needed(&env->log))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
bpf_verifier_vlog(&env->log, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
|
||||
|
||||
__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
|
||||
{
|
||||
struct bpf_verifier_env *env = private_data;
|
||||
@ -403,20 +348,6 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (!bpf_verifier_log_needed(log))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
bpf_verifier_vlog(log, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_log);
|
||||
|
||||
static const char *ltrim(const char *s)
|
||||
{
|
||||
while (isspace(*s))
|
||||
@ -1508,10 +1439,10 @@ static inline u32 vlog_alignment(u32 pos)
|
||||
static void print_insn_state(struct bpf_verifier_env *env,
|
||||
const struct bpf_func_state *state)
|
||||
{
|
||||
if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
|
||||
if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
|
||||
/* remove new line character */
|
||||
bpf_vlog_reset(&env->log, env->prev_log_len - 1);
|
||||
verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
|
||||
bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
|
||||
verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
|
||||
} else {
|
||||
verbose(env, "%d:", env->insn_idx);
|
||||
}
|
||||
@ -1819,7 +1750,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
||||
elem->insn_idx = insn_idx;
|
||||
elem->prev_insn_idx = prev_insn_idx;
|
||||
elem->next = env->head;
|
||||
elem->log_pos = env->log.len_used;
|
||||
elem->log_pos = env->log.end_pos;
|
||||
env->head = elem;
|
||||
env->stack_size++;
|
||||
err = copy_verifier_state(&elem->st, cur);
|
||||
@ -2355,7 +2286,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
|
||||
elem->insn_idx = insn_idx;
|
||||
elem->prev_insn_idx = prev_insn_idx;
|
||||
elem->next = env->head;
|
||||
elem->log_pos = env->log.len_used;
|
||||
elem->log_pos = env->log.end_pos;
|
||||
env->head = elem;
|
||||
env->stack_size++;
|
||||
if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
|
||||
@ -15707,11 +15638,11 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
print_insn_state(env, state->frame[state->curframe]);
|
||||
|
||||
verbose_linfo(env, env->insn_idx, "; ");
|
||||
env->prev_log_len = env->log.len_used;
|
||||
env->prev_log_pos = env->log.end_pos;
|
||||
verbose(env, "%d: ", env->insn_idx);
|
||||
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
|
||||
env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
|
||||
env->prev_log_len = env->log.len_used;
|
||||
env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos;
|
||||
env->prev_log_pos = env->log.end_pos;
|
||||
}
|
||||
|
||||
if (bpf_prog_is_offloaded(env->prog->aux)) {
|
||||
@ -18763,12 +18694,12 @@ struct btf *bpf_get_btf_vmlinux(void)
|
||||
return btf_vmlinux;
|
||||
}
|
||||
|
||||
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
|
||||
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
|
||||
{
|
||||
u64 start_time = ktime_get_ns();
|
||||
struct bpf_verifier_env *env;
|
||||
struct bpf_verifier_log *log;
|
||||
int i, len, ret = -EINVAL;
|
||||
int i, len, ret = -EINVAL, err;
|
||||
u32 log_true_size;
|
||||
bool is_priv;
|
||||
|
||||
/* no program is valid */
|
||||
@ -18781,7 +18712,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
|
||||
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
log = &env->log;
|
||||
|
||||
len = (*prog)->len;
|
||||
env->insn_aux_data =
|
||||
@ -18802,20 +18732,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
|
||||
if (!is_priv)
|
||||
mutex_lock(&bpf_verifier_lock);
|
||||
|
||||
if (attr->log_level || attr->log_buf || attr->log_size) {
|
||||
/* user requested verbose verifier output
|
||||
* and supplied buffer to store the verification trace
|
||||
*/
|
||||
log->level = attr->log_level;
|
||||
log->ubuf = (char __user *) (unsigned long) attr->log_buf;
|
||||
log->len_total = attr->log_size;
|
||||
|
||||
/* log attributes have to be sane */
|
||||
if (!bpf_verifier_log_attr_valid(log)) {
|
||||
ret = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
}
|
||||
/* user could have requested verbose verifier output
|
||||
* and supplied buffer to store the verification trace
|
||||
*/
|
||||
ret = bpf_vlog_init(&env->log, attr->log_level,
|
||||
(char __user *) (unsigned long) attr->log_buf,
|
||||
attr->log_size);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
mark_verifier_state_clean(env);
|
||||
|
||||
@ -18929,9 +18853,14 @@ skip_full_check:
|
||||
print_verification_stats(env);
|
||||
env->prog->aux->verified_insns = env->insn_processed;
|
||||
|
||||
if (log->level && bpf_verifier_log_full(log))
|
||||
ret = -ENOSPC;
|
||||
if (log->level && !log->ubuf) {
|
||||
/* preserve original error even if log finalization is successful */
|
||||
err = bpf_vlog_finalize(&env->log, &log_true_size);
|
||||
if (err)
|
||||
ret = err;
|
||||
|
||||
if (uattr_size >= offsetofend(union bpf_attr, log_true_size) &&
|
||||
copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size),
|
||||
&log_true_size, sizeof(log_true_size))) {
|
||||
ret = -EFAULT;
|
||||
goto err_release_maps;
|
||||
}
|
||||
|
@ -1407,6 +1407,11 @@ union bpf_attr {
|
||||
__aligned_u64 fd_array; /* array of FDs */
|
||||
__aligned_u64 core_relos;
|
||||
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@ -1492,6 +1497,11 @@ union bpf_attr {
|
||||
__u32 btf_size;
|
||||
__u32 btf_log_size;
|
||||
__u32 btf_log_level;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 btf_log_true_size;
|
||||
};
|
||||
|
||||
struct {
|
||||
@ -1513,7 +1523,7 @@ union bpf_attr {
|
||||
struct { /* struct used by BPF_LINK_CREATE command */
|
||||
union {
|
||||
__u32 prog_fd; /* eBPF program to attach */
|
||||
__u32 map_fd; /* eBPF struct_ops to attach */
|
||||
__u32 map_fd; /* struct_ops to attach */
|
||||
};
|
||||
union {
|
||||
__u32 target_fd; /* object to attach to */
|
||||
|
@ -230,9 +230,9 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
|
||||
int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
struct bpf_prog_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, fd_array);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
|
||||
void *finfo = NULL, *linfo = NULL;
|
||||
const char *func_info, *line_info;
|
||||
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
||||
@ -290,10 +290,6 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
|
||||
if (!!log_buf != !!log_size)
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level > (4 | 2 | 1))
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level && !log_buf)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
|
||||
func_info = OPTS_GET(opts, func_info, NULL);
|
||||
@ -316,6 +312,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
if (fd >= 0)
|
||||
return fd;
|
||||
|
||||
@ -356,6 +353,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
if (fd >= 0)
|
||||
goto done;
|
||||
}
|
||||
@ -370,6 +368,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
attr.log_level = 1;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
}
|
||||
done:
|
||||
/* free() doesn't affect errno, so we don't need to restore it */
|
||||
@ -1084,9 +1083,9 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
|
||||
union bpf_attr attr;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
@ -1129,6 +1128,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa
|
||||
attr.btf_log_level = 1;
|
||||
fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
|
||||
}
|
||||
|
||||
OPTS_SET(opts, log_true_size, attr.btf_log_true_size);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
|
@ -96,13 +96,20 @@ struct bpf_prog_load_opts {
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
char *log_buf;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_prog_load_opts__last_field log_buf
|
||||
#define bpf_prog_load_opts__last_field log_true_size
|
||||
|
||||
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
struct bpf_prog_load_opts *opts);
|
||||
|
||||
/* Flags to direct loading requirements */
|
||||
#define MAPS_RELAX_COMPAT 0x01
|
||||
@ -117,11 +124,18 @@ struct bpf_btf_load_opts {
|
||||
char *log_buf;
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_btf_load_opts__last_field log_size
|
||||
#define bpf_btf_load_opts__last_field log_true_size
|
||||
|
||||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||
const struct bpf_btf_load_opts *opts);
|
||||
struct bpf_btf_load_opts *opts);
|
||||
|
||||
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags);
|
||||
|
@ -24,6 +24,7 @@ static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type)
|
||||
bpf_program__set_autoload(skel->progs.bad_relo, true);
|
||||
memset(log_buf, 0, sizeof(log_buf));
|
||||
bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf));
|
||||
bpf_program__set_log_level(skel->progs.bad_relo, 1 | 8); /* BPF_LOG_FIXED to force truncation */
|
||||
|
||||
err = test_log_fixup__load(skel);
|
||||
if (!ASSERT_ERR(err, "load_fail"))
|
||||
|
450
tools/testing/selftests/bpf/prog_tests/verifier_log.c
Normal file
450
tools/testing/selftests/bpf/prog_tests/verifier_log.c
Normal file
@ -0,0 +1,450 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
#include "test_log_buf.skel.h"
|
||||
|
||||
|
||||
static bool check_prog_load(int prog_fd, bool expect_err, const char *tag)
|
||||
{
|
||||
if (expect_err) {
|
||||
if (!ASSERT_LT(prog_fd, 0, tag)) {
|
||||
close(prog_fd);
|
||||
return false;
|
||||
}
|
||||
} else /* !expect_err */ {
|
||||
if (!ASSERT_GT(prog_fd, 0, tag))
|
||||
return false;
|
||||
}
|
||||
if (prog_fd >= 0)
|
||||
close(prog_fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct {
|
||||
/* strategically placed before others to avoid accidental modification by kernel */
|
||||
char filler[1024];
|
||||
char buf[1024];
|
||||
/* strategically placed after buf[] to catch more accidental corruptions */
|
||||
char reference[1024];
|
||||
} logs;
|
||||
static const struct bpf_insn *insns;
|
||||
static size_t insn_cnt;
|
||||
|
||||
static int load_prog(struct bpf_prog_load_opts *opts, bool expect_load_error)
|
||||
{
|
||||
int prog_fd;
|
||||
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_prog",
|
||||
"GPL", insns, insn_cnt, opts);
|
||||
check_prog_load(prog_fd, expect_load_error, "prog_load");
|
||||
|
||||
return prog_fd;
|
||||
}
|
||||
|
||||
static void verif_log_subtest(const char *name, bool expect_load_error, int log_level)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts);
|
||||
char *exp_log, prog_name[16], op_name[32];
|
||||
struct test_log_buf *skel;
|
||||
struct bpf_program *prog;
|
||||
size_t fixed_log_sz;
|
||||
__u32 log_true_sz_fixed, log_true_sz_rolling;
|
||||
int i, mode, err, prog_fd, res;
|
||||
|
||||
skel = test_log_buf__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
bpf_object__for_each_program(prog, skel->obj) {
|
||||
if (strcmp(bpf_program__name(prog), name) == 0)
|
||||
bpf_program__set_autoload(prog, true);
|
||||
else
|
||||
bpf_program__set_autoload(prog, false);
|
||||
}
|
||||
|
||||
err = test_log_buf__load(skel);
|
||||
if (!expect_load_error && !ASSERT_OK(err, "unexpected_load_failure"))
|
||||
goto cleanup;
|
||||
if (expect_load_error && !ASSERT_ERR(err, "unexpected_load_success"))
|
||||
goto cleanup;
|
||||
|
||||
insns = bpf_program__insns(skel->progs.good_prog);
|
||||
insn_cnt = bpf_program__insn_cnt(skel->progs.good_prog);
|
||||
|
||||
opts.log_buf = logs.reference;
|
||||
opts.log_size = sizeof(logs.reference);
|
||||
opts.log_level = log_level | 8 /* BPF_LOG_FIXED */;
|
||||
load_prog(&opts, expect_load_error);
|
||||
|
||||
fixed_log_sz = strlen(logs.reference) + 1;
|
||||
if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
|
||||
goto cleanup;
|
||||
memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
|
||||
|
||||
/* validate BPF_LOG_FIXED works as verifier log used to work, that is:
|
||||
* we get -ENOSPC and beginning of the full verifier log. This only
|
||||
* works for log_level 2 and log_level 1 + failed program. For log
|
||||
* level 2 we don't reset log at all. For log_level 1 + failed program
|
||||
* we don't get to verification stats output. With log level 1
|
||||
* for successful program final result will be just verifier stats.
|
||||
* But if provided too short log buf, kernel will NULL-out log->ubuf
|
||||
* and will stop emitting further log. This means we'll never see
|
||||
* predictable verifier stats.
|
||||
* Long story short, we do the following -ENOSPC test only for
|
||||
* predictable combinations.
|
||||
*/
|
||||
if (log_level >= 2 || expect_load_error) {
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = log_level | 8; /* fixed-length log */
|
||||
opts.log_size = 25;
|
||||
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed25",
|
||||
"GPL", insns, insn_cnt, &opts);
|
||||
if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) {
|
||||
if (prog_fd >= 0)
|
||||
close(prog_fd);
|
||||
goto cleanup;
|
||||
}
|
||||
if (!ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name))
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* validate rolling verifier log logic: try all variations of log buf
|
||||
* length to force various truncation scenarios
|
||||
*/
|
||||
opts.log_buf = logs.buf;
|
||||
|
||||
/* rotating mode, then fixed mode */
|
||||
for (mode = 1; mode >= 0; mode--) {
|
||||
/* prefill logs.buf with 'A's to detect any write beyond allowed length */
|
||||
memset(logs.filler, 'A', sizeof(logs.filler));
|
||||
logs.filler[sizeof(logs.filler) - 1] = '\0';
|
||||
memset(logs.buf, 'A', sizeof(logs.buf));
|
||||
logs.buf[sizeof(logs.buf) - 1] = '\0';
|
||||
|
||||
for (i = 1; i < fixed_log_sz; i++) {
|
||||
opts.log_size = i;
|
||||
opts.log_level = log_level | (mode ? 0 : 8 /* BPF_LOG_FIXED */);
|
||||
|
||||
snprintf(prog_name, sizeof(prog_name),
|
||||
"log_%s_%d", mode ? "roll" : "fixed", i);
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, prog_name,
|
||||
"GPL", insns, insn_cnt, &opts);
|
||||
|
||||
snprintf(op_name, sizeof(op_name),
|
||||
"log_%s_prog_load_%d", mode ? "roll" : "fixed", i);
|
||||
if (!ASSERT_EQ(prog_fd, -ENOSPC, op_name)) {
|
||||
if (prog_fd >= 0)
|
||||
close(prog_fd);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
snprintf(op_name, sizeof(op_name),
|
||||
"log_%s_strlen_%d", mode ? "roll" : "fixed", i);
|
||||
ASSERT_EQ(strlen(logs.buf), i - 1, op_name);
|
||||
|
||||
if (mode)
|
||||
exp_log = logs.reference + fixed_log_sz - i;
|
||||
else
|
||||
exp_log = logs.reference;
|
||||
|
||||
snprintf(op_name, sizeof(op_name),
|
||||
"log_%s_contents_%d", mode ? "roll" : "fixed", i);
|
||||
if (!ASSERT_STRNEQ(logs.buf, exp_log, i - 1, op_name)) {
|
||||
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
|
||||
strncmp(logs.buf, exp_log, i - 1),
|
||||
logs.buf, exp_log);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* check that unused portions of logs.buf is not overwritten */
|
||||
snprintf(op_name, sizeof(op_name),
|
||||
"log_%s_unused_%d", mode ? "roll" : "fixed", i);
|
||||
if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
|
||||
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
|
||||
strcmp(logs.buf + i, logs.filler + i),
|
||||
logs.buf + i, logs.filler + i);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* (FIXED) get actual log size */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = sizeof(logs.buf);
|
||||
opts.log_true_size = 0;
|
||||
res = load_prog(&opts, expect_load_error);
|
||||
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed");
|
||||
|
||||
log_true_sz_fixed = opts.log_true_size;
|
||||
ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
|
||||
|
||||
/* (FIXED, NULL) get actual log size */
|
||||
opts.log_buf = NULL;
|
||||
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = 0;
|
||||
opts.log_true_size = 0;
|
||||
res = load_prog(&opts, expect_load_error);
|
||||
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed_null");
|
||||
ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
|
||||
|
||||
/* (ROLLING) get actual log size */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = log_level;
|
||||
opts.log_size = sizeof(logs.buf);
|
||||
opts.log_true_size = 0;
|
||||
res = load_prog(&opts, expect_load_error);
|
||||
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling");
|
||||
|
||||
log_true_sz_rolling = opts.log_true_size;
|
||||
ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
|
||||
|
||||
/* (ROLLING, NULL) get actual log size */
|
||||
opts.log_buf = NULL;
|
||||
opts.log_level = log_level;
|
||||
opts.log_size = 0;
|
||||
opts.log_true_size = 0;
|
||||
res = load_prog(&opts, expect_load_error);
|
||||
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling_null");
|
||||
ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
|
||||
|
||||
/* (FIXED) expect -ENOSPC for one byte short log */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = log_true_sz_fixed - 1;
|
||||
opts.log_true_size = 0;
|
||||
res = load_prog(&opts, true /* should fail */);
|
||||
ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_fixed");
|
||||
|
||||
/* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = log_true_sz_fixed;
|
||||
opts.log_true_size = 0;
|
||||
res = load_prog(&opts, expect_load_error);
|
||||
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_fixed");
|
||||
|
||||
/* (ROLLING) expect -ENOSPC for one byte short log */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = log_level;
|
||||
opts.log_size = log_true_sz_rolling - 1;
|
||||
res = load_prog(&opts, true /* should fail */);
|
||||
ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_rolling");
|
||||
|
||||
/* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = log_level;
|
||||
opts.log_size = log_true_sz_rolling;
|
||||
opts.log_true_size = 0;
|
||||
res = load_prog(&opts, expect_load_error);
|
||||
ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_rolling");
|
||||
|
||||
cleanup:
|
||||
test_log_buf__destroy(skel);
|
||||
}
|
||||
|
||||
static const void *btf_data;
|
||||
static u32 btf_data_sz;
|
||||
|
||||
static int load_btf(struct bpf_btf_load_opts *opts, bool expect_err)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = bpf_btf_load(btf_data, btf_data_sz, opts);
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
if (expect_err)
|
||||
ASSERT_LT(fd, 0, "btf_load_failure");
|
||||
else /* !expect_err */
|
||||
ASSERT_GT(fd, 0, "btf_load_success");
|
||||
return fd;
|
||||
}
|
||||
|
||||
static void verif_btf_log_subtest(bool bad_btf)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_btf_load_opts, opts);
|
||||
struct btf *btf;
|
||||
struct btf_type *t;
|
||||
char *exp_log, op_name[32];
|
||||
size_t fixed_log_sz;
|
||||
__u32 log_true_sz_fixed, log_true_sz_rolling;
|
||||
int i, res;
|
||||
|
||||
/* prepare simple BTF contents */
|
||||
btf = btf__new_empty();
|
||||
if (!ASSERT_OK_PTR(btf, "btf_new_empty"))
|
||||
return;
|
||||
res = btf__add_int(btf, "whatever", 4, 0);
|
||||
if (!ASSERT_GT(res, 0, "btf_add_int_id"))
|
||||
goto cleanup;
|
||||
if (bad_btf) {
|
||||
/* btf__add_int() doesn't allow bad value of size, so we'll just
|
||||
* force-cast btf_type pointer and manually override size to invalid
|
||||
* 3 if we need to simulate failure
|
||||
*/
|
||||
t = (void *)btf__type_by_id(btf, res);
|
||||
if (!ASSERT_OK_PTR(t, "int_btf_type"))
|
||||
goto cleanup;
|
||||
t->size = 3;
|
||||
}
|
||||
|
||||
btf_data = btf__raw_data(btf, &btf_data_sz);
|
||||
if (!ASSERT_OK_PTR(btf_data, "btf_data"))
|
||||
goto cleanup;
|
||||
|
||||
load_btf(&opts, bad_btf);
|
||||
|
||||
opts.log_buf = logs.reference;
|
||||
opts.log_size = sizeof(logs.reference);
|
||||
opts.log_level = 1 | 8 /* BPF_LOG_FIXED */;
|
||||
load_btf(&opts, bad_btf);
|
||||
|
||||
fixed_log_sz = strlen(logs.reference) + 1;
|
||||
if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
|
||||
goto cleanup;
|
||||
memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
|
||||
|
||||
/* validate BPF_LOG_FIXED truncation works as verifier log used to work */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1 | 8; /* fixed-length log */
|
||||
opts.log_size = 25;
|
||||
res = load_btf(&opts, true);
|
||||
ASSERT_EQ(res, -ENOSPC, "half_log_fd");
|
||||
ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25");
|
||||
ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name);
|
||||
|
||||
/* validate rolling verifier log logic: try all variations of log buf
|
||||
* length to force various truncation scenarios
|
||||
*/
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1; /* rolling log */
|
||||
|
||||
/* prefill logs.buf with 'A's to detect any write beyond allowed length */
|
||||
memset(logs.filler, 'A', sizeof(logs.filler));
|
||||
logs.filler[sizeof(logs.filler) - 1] = '\0';
|
||||
memset(logs.buf, 'A', sizeof(logs.buf));
|
||||
logs.buf[sizeof(logs.buf) - 1] = '\0';
|
||||
|
||||
for (i = 1; i < fixed_log_sz; i++) {
|
||||
opts.log_size = i;
|
||||
|
||||
snprintf(op_name, sizeof(op_name), "log_roll_btf_load_%d", i);
|
||||
res = load_btf(&opts, true);
|
||||
if (!ASSERT_EQ(res, -ENOSPC, op_name))
|
||||
goto cleanup;
|
||||
|
||||
exp_log = logs.reference + fixed_log_sz - i;
|
||||
snprintf(op_name, sizeof(op_name), "log_roll_contents_%d", i);
|
||||
if (!ASSERT_STREQ(logs.buf, exp_log, op_name)) {
|
||||
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
|
||||
strcmp(logs.buf, exp_log),
|
||||
logs.buf, exp_log);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* check that unused portions of logs.buf are not overwritten */
|
||||
snprintf(op_name, sizeof(op_name), "log_roll_unused_tail_%d", i);
|
||||
if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
|
||||
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
|
||||
strcmp(logs.buf + i, logs.filler + i),
|
||||
logs.buf + i, logs.filler + i);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
/* (FIXED) get actual log size */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = sizeof(logs.buf);
|
||||
opts.log_true_size = 0;
|
||||
res = load_btf(&opts, bad_btf);
|
||||
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed");
|
||||
|
||||
log_true_sz_fixed = opts.log_true_size;
|
||||
ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
|
||||
|
||||
/* (FIXED, NULL) get actual log size */
|
||||
opts.log_buf = NULL;
|
||||
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = 0;
|
||||
opts.log_true_size = 0;
|
||||
res = load_btf(&opts, bad_btf);
|
||||
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed_null");
|
||||
ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
|
||||
|
||||
/* (ROLLING) get actual log size */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1;
|
||||
opts.log_size = sizeof(logs.buf);
|
||||
opts.log_true_size = 0;
|
||||
res = load_btf(&opts, bad_btf);
|
||||
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling");
|
||||
|
||||
log_true_sz_rolling = opts.log_true_size;
|
||||
ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
|
||||
|
||||
/* (ROLLING, NULL) get actual log size */
|
||||
opts.log_buf = NULL;
|
||||
opts.log_level = 1;
|
||||
opts.log_size = 0;
|
||||
opts.log_true_size = 0;
|
||||
res = load_btf(&opts, bad_btf);
|
||||
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling_null");
|
||||
ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
|
||||
|
||||
/* (FIXED) expect -ENOSPC for one byte short log */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = log_true_sz_fixed - 1;
|
||||
opts.log_true_size = 0;
|
||||
res = load_btf(&opts, true);
|
||||
ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_fixed");
|
||||
|
||||
/* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
|
||||
opts.log_size = log_true_sz_fixed;
|
||||
opts.log_true_size = 0;
|
||||
res = load_btf(&opts, bad_btf);
|
||||
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_fixed");
|
||||
|
||||
/* (ROLLING) expect -ENOSPC for one byte short log */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1;
|
||||
opts.log_size = log_true_sz_rolling - 1;
|
||||
res = load_btf(&opts, true);
|
||||
ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_rolling");
|
||||
|
||||
/* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
|
||||
opts.log_buf = logs.buf;
|
||||
opts.log_level = 1;
|
||||
opts.log_size = log_true_sz_rolling;
|
||||
opts.log_true_size = 0;
|
||||
res = load_btf(&opts, bad_btf);
|
||||
ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_rolling");
|
||||
|
||||
cleanup:
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
void test_verifier_log(void)
|
||||
{
|
||||
if (test__start_subtest("good_prog-level1"))
|
||||
verif_log_subtest("good_prog", false, 1);
|
||||
if (test__start_subtest("good_prog-level2"))
|
||||
verif_log_subtest("good_prog", false, 2);
|
||||
if (test__start_subtest("bad_prog-level1"))
|
||||
verif_log_subtest("bad_prog", true, 1);
|
||||
if (test__start_subtest("bad_prog-level2"))
|
||||
verif_log_subtest("bad_prog", true, 2);
|
||||
if (test__start_subtest("bad_btf"))
|
||||
verif_btf_log_subtest(true /* bad btf */);
|
||||
if (test__start_subtest("good_btf"))
|
||||
verif_btf_log_subtest(false /* !bad btf */);
|
||||
}
|
@ -141,12 +141,15 @@ static struct env {
|
||||
bool verbose;
|
||||
bool debug;
|
||||
bool quiet;
|
||||
int log_level;
|
||||
enum resfmt out_fmt;
|
||||
bool show_version;
|
||||
bool comparison_mode;
|
||||
bool replay_mode;
|
||||
|
||||
int log_level;
|
||||
int log_size;
|
||||
bool log_fixed;
|
||||
|
||||
struct verif_stats *prog_stats;
|
||||
int prog_stat_cnt;
|
||||
|
||||
@ -193,12 +196,19 @@ const char argp_program_doc[] =
|
||||
" OR: veristat -C <baseline.csv> <comparison.csv>\n"
|
||||
" OR: veristat -R <results.csv>\n";
|
||||
|
||||
enum {
|
||||
OPT_LOG_FIXED = 1000,
|
||||
OPT_LOG_SIZE = 1001,
|
||||
};
|
||||
|
||||
static const struct argp_option opts[] = {
|
||||
{ NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" },
|
||||
{ "version", 'V', NULL, 0, "Print version" },
|
||||
{ "verbose", 'v', NULL, 0, "Verbose mode" },
|
||||
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
|
||||
{ "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" },
|
||||
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
|
||||
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
|
||||
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
|
||||
{ "quiet", 'q', NULL, 0, "Quiet mode" },
|
||||
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
|
||||
{ "sort", 's', "SPEC", 0, "Specify sort order" },
|
||||
@ -263,6 +273,17 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
argp_usage(state);
|
||||
}
|
||||
break;
|
||||
case OPT_LOG_FIXED:
|
||||
env.log_fixed = true;
|
||||
break;
|
||||
case OPT_LOG_SIZE:
|
||||
errno = 0;
|
||||
env.log_size = strtol(arg, NULL, 10);
|
||||
if (errno) {
|
||||
fprintf(stderr, "invalid log size: %s\n", arg);
|
||||
argp_usage(state);
|
||||
}
|
||||
break;
|
||||
case 'C':
|
||||
env.comparison_mode = true;
|
||||
break;
|
||||
@ -929,8 +950,8 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
|
||||
{
|
||||
const char *prog_name = bpf_program__name(prog);
|
||||
const char *base_filename = basename(filename);
|
||||
size_t buf_sz = sizeof(verif_log_buf);
|
||||
char *buf = verif_log_buf;
|
||||
char *buf;
|
||||
int buf_sz, log_level;
|
||||
struct verif_stats *stats;
|
||||
int err = 0;
|
||||
void *tmp;
|
||||
@ -948,18 +969,23 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
|
||||
if (env.verbose) {
|
||||
buf_sz = 16 * 1024 * 1024;
|
||||
buf_sz = env.log_size ? env.log_size : 16 * 1024 * 1024;
|
||||
buf = malloc(buf_sz);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
bpf_program__set_log_buf(prog, buf, buf_sz);
|
||||
bpf_program__set_log_level(prog, env.log_level | 4); /* stats + log */
|
||||
/* ensure we always request stats */
|
||||
log_level = env.log_level | 4 | (env.log_fixed ? 8 : 0);
|
||||
} else {
|
||||
bpf_program__set_log_buf(prog, buf, buf_sz);
|
||||
bpf_program__set_log_level(prog, 4); /* only verifier stats */
|
||||
buf = verif_log_buf;
|
||||
buf_sz = sizeof(verif_log_buf);
|
||||
/* request only verifier stats */
|
||||
log_level = 4 | (env.log_fixed ? 8 : 0);
|
||||
}
|
||||
verif_log_buf[0] = '\0';
|
||||
|
||||
bpf_program__set_log_buf(prog, buf, buf_sz);
|
||||
bpf_program__set_log_level(prog, log_level);
|
||||
|
||||
/* increase chances of successful BPF object loading */
|
||||
fixup_obj(obj, prog, base_filename);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user