mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
Merge branch 'Add precision propagation for subprogs and callbacks'
Andrii Nakryiko says: ==================== As more and more real-world BPF programs become more complex and increasingly use subprograms (both static and global), scalar precision tracking and its (previously weak) support for BPF subprograms (and callbacks as a special case of that) is becoming more and more of an issue and limitation. Couple that with increasing reliance on state equivalence (BPF open-coded iterators have a hard requirement for state equivalence to converge and successfully validate loops), and it becomes pretty critical to address this limitation and make precision tracking universally supported for BPF programs of any complexity and composition. This patch set teaches BPF verifier to support SCALAR precision backpropagation across multiple frames (for subprogram calls and callback simulations) and addresses most practical situations (SCALAR stack loads/stores using registers other than r10 being the last remaining limitation, though thankfully rarely used in practice). Main logic is explained in details in patch #8. The rest are preliminary preparations, refactorings, clean ups, and fixes. See respective patches for details. Patch #8 has also veristat comparison of results for selftests, Cilium, and some of Meta production BPF programs before and after these changes. v2->v3: - drop bitcnt and ifs from bt_xxx() helpers (Alexei); v1->v2: - addressed review feedback form Alexei, adjusted commit messages, comments, added verbose(), WARN_ONCE(), etc; - re-ran all the tests and veristat on selftests, cilium, and meta-internal code: no new changes and no kernel warnings. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
fbc0b02530
@ -18,8 +18,11 @@
|
||||
* that converting umax_value to int cannot overflow.
|
||||
*/
|
||||
#define BPF_MAX_VAR_SIZ (1 << 29)
|
||||
/* size of type_str_buf in bpf_verifier. */
|
||||
#define TYPE_STR_BUF_LEN 128
|
||||
/* size of tmp_str_buf in bpf_verifier.
|
||||
* we need at least 306 bytes to fit full stack mask representation
|
||||
* (in the "-8,-16,...,-512" form)
|
||||
*/
|
||||
#define TMP_STR_BUF_LEN 320
|
||||
|
||||
/* Liveness marks, used for registers and spilled-regs (in stack slots).
|
||||
* Read marks propagate upwards until they find a write mark; they record that
|
||||
@ -238,6 +241,10 @@ enum bpf_stack_slot_type {
|
||||
|
||||
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
||||
|
||||
#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
|
||||
(1 << BPF_REG_3) | (1 << BPF_REG_4) | \
|
||||
(1 << BPF_REG_5))
|
||||
|
||||
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
|
||||
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
|
||||
|
||||
@ -541,6 +548,15 @@ struct bpf_subprog_info {
|
||||
bool is_async_cb;
|
||||
};
|
||||
|
||||
struct bpf_verifier_env;
|
||||
|
||||
struct backtrack_state {
|
||||
struct bpf_verifier_env *env;
|
||||
u32 frame;
|
||||
u32 reg_masks[MAX_CALL_FRAMES];
|
||||
u64 stack_masks[MAX_CALL_FRAMES];
|
||||
};
|
||||
|
||||
/* single container for all structs
|
||||
* one verifier_env per bpf_check() call
|
||||
*/
|
||||
@ -578,6 +594,7 @@ struct bpf_verifier_env {
|
||||
int *insn_stack;
|
||||
int cur_stack;
|
||||
} cfg;
|
||||
struct backtrack_state bt;
|
||||
u32 pass_cnt; /* number of times do_check() was called */
|
||||
u32 subprog_cnt;
|
||||
/* number of instructions analyzed by the verifier */
|
||||
@ -606,8 +623,10 @@ struct bpf_verifier_env {
|
||||
/* Same as scratched_regs but for stack slots */
|
||||
u64 scratched_stack_slots;
|
||||
u64 prev_log_pos, prev_insn_print_pos;
|
||||
/* buffer used in reg_type_str() to generate reg_type string */
|
||||
char type_str_buf[TYPE_STR_BUF_LEN];
|
||||
/* buffer used to generate temporary string representations,
|
||||
* e.g., in reg_type_str() to generate reg_type string
|
||||
*/
|
||||
char tmp_str_buf[TMP_STR_BUF_LEN];
|
||||
};
|
||||
|
||||
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
|
||||
|
@ -240,6 +240,12 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
|
||||
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
|
||||
}
|
||||
|
||||
static bool bpf_helper_call(const struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
insn->src_reg == 0;
|
||||
}
|
||||
|
||||
static bool bpf_pseudo_call(const struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
@ -469,6 +475,13 @@ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
|
||||
return rec;
|
||||
}
|
||||
|
||||
static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
|
||||
{
|
||||
struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
|
||||
|
||||
return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL;
|
||||
}
|
||||
|
||||
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
|
||||
{
|
||||
return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
|
||||
@ -516,6 +529,8 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
|
||||
return func_id == BPF_FUNC_dynptr_data;
|
||||
}
|
||||
|
||||
static bool is_callback_calling_kfunc(u32 btf_id);
|
||||
|
||||
static bool is_callback_calling_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_for_each_map_elem ||
|
||||
@ -525,6 +540,11 @@ static bool is_callback_calling_function(enum bpf_func_id func_id)
|
||||
func_id == BPF_FUNC_user_ringbuf_drain;
|
||||
}
|
||||
|
||||
static bool is_async_callback_calling_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_timer_set_callback;
|
||||
}
|
||||
|
||||
static bool is_storage_get_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_sk_storage_get ||
|
||||
@ -605,9 +625,9 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
|
||||
type & PTR_TRUSTED ? "trusted_" : ""
|
||||
);
|
||||
|
||||
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
|
||||
snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
|
||||
prefix, str[base_type(type)], postfix);
|
||||
return env->type_str_buf;
|
||||
return env->tmp_str_buf;
|
||||
}
|
||||
|
||||
static char slot_type_char[] = {
|
||||
@ -1296,6 +1316,12 @@ static bool is_spilled_reg(const struct bpf_stack_state *stack)
|
||||
return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
|
||||
}
|
||||
|
||||
static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
|
||||
{
|
||||
return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL &&
|
||||
stack->spilled_ptr.type == SCALAR_VALUE;
|
||||
}
|
||||
|
||||
static void scrub_spilled_slot(u8 *stype)
|
||||
{
|
||||
if (*stype != STACK_INVALID)
|
||||
@ -3186,12 +3212,172 @@ static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
|
||||
return btf_name_by_offset(desc_btf, func->name_off);
|
||||
}
|
||||
|
||||
static inline void bt_init(struct backtrack_state *bt, u32 frame)
|
||||
{
|
||||
bt->frame = frame;
|
||||
}
|
||||
|
||||
static inline void bt_reset(struct backtrack_state *bt)
|
||||
{
|
||||
struct bpf_verifier_env *env = bt->env;
|
||||
|
||||
memset(bt, 0, sizeof(*bt));
|
||||
bt->env = env;
|
||||
}
|
||||
|
||||
static inline u32 bt_empty(struct backtrack_state *bt)
|
||||
{
|
||||
u64 mask = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= bt->frame; i++)
|
||||
mask |= bt->reg_masks[i] | bt->stack_masks[i];
|
||||
|
||||
return mask == 0;
|
||||
}
|
||||
|
||||
static inline int bt_subprog_enter(struct backtrack_state *bt)
|
||||
{
|
||||
if (bt->frame == MAX_CALL_FRAMES - 1) {
|
||||
verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame);
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
bt->frame++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int bt_subprog_exit(struct backtrack_state *bt)
|
||||
{
|
||||
if (bt->frame == 0) {
|
||||
verbose(bt->env, "BUG subprog exit from frame 0\n");
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
bt->frame--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
|
||||
{
|
||||
bt->reg_masks[frame] |= 1 << reg;
|
||||
}
|
||||
|
||||
static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
|
||||
{
|
||||
bt->reg_masks[frame] &= ~(1 << reg);
|
||||
}
|
||||
|
||||
static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
|
||||
{
|
||||
bt_set_frame_reg(bt, bt->frame, reg);
|
||||
}
|
||||
|
||||
static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
|
||||
{
|
||||
bt_clear_frame_reg(bt, bt->frame, reg);
|
||||
}
|
||||
|
||||
static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
|
||||
{
|
||||
bt->stack_masks[frame] |= 1ull << slot;
|
||||
}
|
||||
|
||||
static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
|
||||
{
|
||||
bt->stack_masks[frame] &= ~(1ull << slot);
|
||||
}
|
||||
|
||||
static inline void bt_set_slot(struct backtrack_state *bt, u32 slot)
|
||||
{
|
||||
bt_set_frame_slot(bt, bt->frame, slot);
|
||||
}
|
||||
|
||||
static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot)
|
||||
{
|
||||
bt_clear_frame_slot(bt, bt->frame, slot);
|
||||
}
|
||||
|
||||
static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
|
||||
{
|
||||
return bt->reg_masks[frame];
|
||||
}
|
||||
|
||||
static inline u32 bt_reg_mask(struct backtrack_state *bt)
|
||||
{
|
||||
return bt->reg_masks[bt->frame];
|
||||
}
|
||||
|
||||
static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
|
||||
{
|
||||
return bt->stack_masks[frame];
|
||||
}
|
||||
|
||||
static inline u64 bt_stack_mask(struct backtrack_state *bt)
|
||||
{
|
||||
return bt->stack_masks[bt->frame];
|
||||
}
|
||||
|
||||
static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
|
||||
{
|
||||
return bt->reg_masks[bt->frame] & (1 << reg);
|
||||
}
|
||||
|
||||
static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot)
|
||||
{
|
||||
return bt->stack_masks[bt->frame] & (1ull << slot);
|
||||
}
|
||||
|
||||
/* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
|
||||
static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask)
|
||||
{
|
||||
DECLARE_BITMAP(mask, 64);
|
||||
bool first = true;
|
||||
int i, n;
|
||||
|
||||
buf[0] = '\0';
|
||||
|
||||
bitmap_from_u64(mask, reg_mask);
|
||||
for_each_set_bit(i, mask, 32) {
|
||||
n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i);
|
||||
first = false;
|
||||
buf += n;
|
||||
buf_sz -= n;
|
||||
if (buf_sz < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
|
||||
static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
|
||||
{
|
||||
DECLARE_BITMAP(mask, 64);
|
||||
bool first = true;
|
||||
int i, n;
|
||||
|
||||
buf[0] = '\0';
|
||||
|
||||
bitmap_from_u64(mask, stack_mask);
|
||||
for_each_set_bit(i, mask, 64) {
|
||||
n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8);
|
||||
first = false;
|
||||
buf += n;
|
||||
buf_sz -= n;
|
||||
if (buf_sz < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* For given verifier state backtrack_insn() is called from the last insn to
|
||||
* the first insn. Its purpose is to compute a bitmask of registers and
|
||||
* stack slots that needs precision in the parent verifier state.
|
||||
*
|
||||
* @idx is an index of the instruction we are currently processing;
|
||||
* @subseq_idx is an index of the subsequent instruction that:
|
||||
* - *would be* executed next, if jump history is viewed in forward order;
|
||||
* - *was* processed previously during backtracking.
|
||||
*/
|
||||
static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
u32 *reg_mask, u64 *stack_mask)
|
||||
static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
|
||||
struct backtrack_state *bt)
|
||||
{
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_call = disasm_kfunc_name,
|
||||
@ -3202,20 +3388,24 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
u8 class = BPF_CLASS(insn->code);
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
u8 mode = BPF_MODE(insn->code);
|
||||
u32 dreg = 1u << insn->dst_reg;
|
||||
u32 sreg = 1u << insn->src_reg;
|
||||
u32 spi;
|
||||
u32 dreg = insn->dst_reg;
|
||||
u32 sreg = insn->src_reg;
|
||||
u32 spi, i;
|
||||
|
||||
if (insn->code == 0)
|
||||
return 0;
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
|
||||
fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt));
|
||||
verbose(env, "mark_precise: frame%d: regs=%s ",
|
||||
bt->frame, env->tmp_str_buf);
|
||||
fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
|
||||
verbose(env, "stack=%s before ", env->tmp_str_buf);
|
||||
verbose(env, "%d: ", idx);
|
||||
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
|
||||
}
|
||||
|
||||
if (class == BPF_ALU || class == BPF_ALU64) {
|
||||
if (!(*reg_mask & dreg))
|
||||
if (!bt_is_reg_set(bt, dreg))
|
||||
return 0;
|
||||
if (opcode == BPF_MOV) {
|
||||
if (BPF_SRC(insn->code) == BPF_X) {
|
||||
@ -3223,8 +3413,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
* dreg needs precision after this insn
|
||||
* sreg needs precision before this insn
|
||||
*/
|
||||
*reg_mask &= ~dreg;
|
||||
*reg_mask |= sreg;
|
||||
bt_clear_reg(bt, dreg);
|
||||
bt_set_reg(bt, sreg);
|
||||
} else {
|
||||
/* dreg = K
|
||||
* dreg needs precision after this insn.
|
||||
@ -3232,7 +3422,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
* as precise=true in this verifier state.
|
||||
* No further markings in parent are necessary
|
||||
*/
|
||||
*reg_mask &= ~dreg;
|
||||
bt_clear_reg(bt, dreg);
|
||||
}
|
||||
} else {
|
||||
if (BPF_SRC(insn->code) == BPF_X) {
|
||||
@ -3240,15 +3430,15 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
* both dreg and sreg need precision
|
||||
* before this insn
|
||||
*/
|
||||
*reg_mask |= sreg;
|
||||
bt_set_reg(bt, sreg);
|
||||
} /* else dreg += K
|
||||
* dreg still needs precision before this insn
|
||||
*/
|
||||
}
|
||||
} else if (class == BPF_LDX) {
|
||||
if (!(*reg_mask & dreg))
|
||||
if (!bt_is_reg_set(bt, dreg))
|
||||
return 0;
|
||||
*reg_mask &= ~dreg;
|
||||
bt_clear_reg(bt, dreg);
|
||||
|
||||
/* scalars can only be spilled into stack w/o losing precision.
|
||||
* Load from any other memory can be zero extended.
|
||||
@ -3269,9 +3459,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
*stack_mask |= 1ull << spi;
|
||||
bt_set_slot(bt, spi);
|
||||
} else if (class == BPF_STX || class == BPF_ST) {
|
||||
if (*reg_mask & dreg)
|
||||
if (bt_is_reg_set(bt, dreg))
|
||||
/* stx & st shouldn't be using _scalar_ dst_reg
|
||||
* to access memory. It means backtracking
|
||||
* encountered a case of pointer subtraction.
|
||||
@ -3286,20 +3476,92 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (!(*stack_mask & (1ull << spi)))
|
||||
if (!bt_is_slot_set(bt, spi))
|
||||
return 0;
|
||||
*stack_mask &= ~(1ull << spi);
|
||||
bt_clear_slot(bt, spi);
|
||||
if (class == BPF_STX)
|
||||
*reg_mask |= sreg;
|
||||
bt_set_reg(bt, sreg);
|
||||
} else if (class == BPF_JMP || class == BPF_JMP32) {
|
||||
if (opcode == BPF_CALL) {
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL)
|
||||
return -ENOTSUPP;
|
||||
/* BPF helpers that invoke callback subprogs are
|
||||
* equivalent to BPF_PSEUDO_CALL above
|
||||
if (bpf_pseudo_call(insn)) {
|
||||
int subprog_insn_idx, subprog;
|
||||
|
||||
subprog_insn_idx = idx + insn->imm + 1;
|
||||
subprog = find_subprog(env, subprog_insn_idx);
|
||||
if (subprog < 0)
|
||||
return -EFAULT;
|
||||
|
||||
if (subprog_is_global(env, subprog)) {
|
||||
/* check that jump history doesn't have any
|
||||
* extra instructions from subprog; the next
|
||||
* instruction after call to global subprog
|
||||
* should be literally next instruction in
|
||||
* caller program
|
||||
*/
|
||||
WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug");
|
||||
/* r1-r5 are invalidated after subprog call,
|
||||
* so for global func call it shouldn't be set
|
||||
* anymore
|
||||
*/
|
||||
if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
|
||||
verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
/* global subprog always sets R0 */
|
||||
bt_clear_reg(bt, BPF_REG_0);
|
||||
return 0;
|
||||
} else {
|
||||
/* static subprog call instruction, which
|
||||
* means that we are exiting current subprog,
|
||||
* so only r1-r5 could be still requested as
|
||||
* precise, r0 and r6-r10 or any stack slot in
|
||||
* the current frame should be zero by now
|
||||
*/
|
||||
if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
|
||||
verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
/* we don't track register spills perfectly,
|
||||
* so fallback to force-precise instead of failing */
|
||||
if (bt_stack_mask(bt) != 0)
|
||||
return -ENOTSUPP;
|
||||
/* propagate r1-r5 to the caller */
|
||||
for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
|
||||
if (bt_is_reg_set(bt, i)) {
|
||||
bt_clear_reg(bt, i);
|
||||
bt_set_frame_reg(bt, bt->frame - 1, i);
|
||||
}
|
||||
}
|
||||
if (bt_subprog_exit(bt))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
} else if ((bpf_helper_call(insn) &&
|
||||
is_callback_calling_function(insn->imm) &&
|
||||
!is_async_callback_calling_function(insn->imm)) ||
|
||||
(bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) {
|
||||
/* callback-calling helper or kfunc call, which means
|
||||
* we are exiting from subprog, but unlike the subprog
|
||||
* call handling above, we shouldn't propagate
|
||||
* precision of r1-r5 (if any requested), as they are
|
||||
* not actually arguments passed directly to callback
|
||||
* subprogs
|
||||
*/
|
||||
if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
|
||||
if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
|
||||
verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (bt_stack_mask(bt) != 0)
|
||||
return -ENOTSUPP;
|
||||
/* clear r1-r5 in callback subprog's mask */
|
||||
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
|
||||
bt_clear_reg(bt, i);
|
||||
if (bt_subprog_exit(bt))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
} else if (opcode == BPF_CALL) {
|
||||
/* kfunc with imm==0 is invalid and fixup_kfunc_call will
|
||||
* catch this error later. Make backtracking conservative
|
||||
* with ENOTSUPP.
|
||||
@ -3307,19 +3569,51 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
|
||||
return -ENOTSUPP;
|
||||
/* regular helper call sets R0 */
|
||||
*reg_mask &= ~1;
|
||||
if (*reg_mask & 0x3f) {
|
||||
bt_clear_reg(bt, BPF_REG_0);
|
||||
if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
|
||||
/* if backtracing was looking for registers R1-R5
|
||||
* they should have been found already.
|
||||
*/
|
||||
verbose(env, "BUG regs %x\n", *reg_mask);
|
||||
verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
} else if (opcode == BPF_EXIT) {
|
||||
return -ENOTSUPP;
|
||||
bool r0_precise;
|
||||
|
||||
if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
|
||||
/* if backtracing was looking for registers R1-R5
|
||||
* they should have been found already.
|
||||
*/
|
||||
verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* BPF_EXIT in subprog or callback always returns
|
||||
* right after the call instruction, so by checking
|
||||
* whether the instruction at subseq_idx-1 is subprog
|
||||
* call or not we can distinguish actual exit from
|
||||
* *subprog* from exit from *callback*. In the former
|
||||
* case, we need to propagate r0 precision, if
|
||||
* necessary. In the former we never do that.
|
||||
*/
|
||||
r0_precise = subseq_idx - 1 >= 0 &&
|
||||
bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) &&
|
||||
bt_is_reg_set(bt, BPF_REG_0);
|
||||
|
||||
bt_clear_reg(bt, BPF_REG_0);
|
||||
if (bt_subprog_enter(bt))
|
||||
return -EFAULT;
|
||||
|
||||
if (r0_precise)
|
||||
bt_set_reg(bt, BPF_REG_0);
|
||||
/* r6-r9 and stack slots will stay set in caller frame
|
||||
* bitmasks until we return back from callee(s)
|
||||
*/
|
||||
return 0;
|
||||
} else if (BPF_SRC(insn->code) == BPF_X) {
|
||||
if (!(*reg_mask & (dreg | sreg)))
|
||||
if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg))
|
||||
return 0;
|
||||
/* dreg <cond> sreg
|
||||
* Both dreg and sreg need precision before
|
||||
@ -3327,7 +3621,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
* before it would be equally necessary to
|
||||
* propagate it to dreg.
|
||||
*/
|
||||
*reg_mask |= (sreg | dreg);
|
||||
bt_set_reg(bt, dreg);
|
||||
bt_set_reg(bt, sreg);
|
||||
/* else dreg <cond> K
|
||||
* Only dreg still needs precision before
|
||||
* this insn, so for the K-based conditional
|
||||
@ -3335,9 +3630,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
*/
|
||||
}
|
||||
} else if (class == BPF_LD) {
|
||||
if (!(*reg_mask & dreg))
|
||||
if (!bt_is_reg_set(bt, dreg))
|
||||
return 0;
|
||||
*reg_mask &= ~dreg;
|
||||
bt_clear_reg(bt, dreg);
|
||||
/* It's ld_imm64 or ld_abs or ld_ind.
|
||||
* For ld_imm64 no further tracking of precision
|
||||
* into parent is necessary
|
||||
@ -3408,6 +3703,11 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg;
|
||||
int i, j;
|
||||
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n",
|
||||
st->curframe);
|
||||
}
|
||||
|
||||
/* big hammer: mark all scalars precise in this path.
|
||||
* pop_stack may still get !precise scalars.
|
||||
* We also skip current state and go straight to first parent state,
|
||||
@ -3419,17 +3719,25 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
|
||||
func = st->frame[i];
|
||||
for (j = 0; j < BPF_REG_FP; j++) {
|
||||
reg = &func->regs[j];
|
||||
if (reg->type != SCALAR_VALUE)
|
||||
if (reg->type != SCALAR_VALUE || reg->precise)
|
||||
continue;
|
||||
reg->precise = true;
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
verbose(env, "force_precise: frame%d: forcing r%d to be precise\n",
|
||||
i, j);
|
||||
}
|
||||
}
|
||||
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
|
||||
if (!is_spilled_reg(&func->stack[j]))
|
||||
continue;
|
||||
reg = &func->stack[j].spilled_ptr;
|
||||
if (reg->type != SCALAR_VALUE)
|
||||
if (reg->type != SCALAR_VALUE || reg->precise)
|
||||
continue;
|
||||
reg->precise = true;
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n",
|
||||
i, -(j + 1) * 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3547,62 +3855,48 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_
|
||||
* mark_all_scalars_imprecise() to hopefully get more permissive and generic
|
||||
* finalized states which help in short circuiting more future states.
|
||||
*/
|
||||
static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
|
||||
int spi)
|
||||
static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
struct backtrack_state *bt = &env->bt;
|
||||
struct bpf_verifier_state *st = env->cur_state;
|
||||
int first_idx = st->first_insn_idx;
|
||||
int last_idx = env->insn_idx;
|
||||
struct bpf_func_state *func;
|
||||
struct bpf_reg_state *reg;
|
||||
u32 reg_mask = regno >= 0 ? 1u << regno : 0;
|
||||
u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
|
||||
bool skip_first = true;
|
||||
bool new_marks = false;
|
||||
int i, err;
|
||||
int i, prev_i, fr, err;
|
||||
|
||||
if (!env->bpf_capable)
|
||||
return 0;
|
||||
|
||||
/* set frame number from which we are starting to backtrack */
|
||||
bt_init(bt, env->cur_state->curframe);
|
||||
|
||||
/* Do sanity checks against current state of register and/or stack
|
||||
* slot, but don't set precise flag in current state, as precision
|
||||
* tracking in the current state is unnecessary.
|
||||
*/
|
||||
func = st->frame[frame];
|
||||
func = st->frame[bt->frame];
|
||||
if (regno >= 0) {
|
||||
reg = &func->regs[regno];
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
WARN_ONCE(1, "backtracing misuse");
|
||||
return -EFAULT;
|
||||
}
|
||||
new_marks = true;
|
||||
bt_set_reg(bt, regno);
|
||||
}
|
||||
|
||||
while (spi >= 0) {
|
||||
if (!is_spilled_reg(&func->stack[spi])) {
|
||||
stack_mask = 0;
|
||||
break;
|
||||
}
|
||||
reg = &func->stack[spi].spilled_ptr;
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
stack_mask = 0;
|
||||
break;
|
||||
}
|
||||
new_marks = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!new_marks)
|
||||
return 0;
|
||||
if (!reg_mask && !stack_mask)
|
||||
if (bt_empty(bt))
|
||||
return 0;
|
||||
|
||||
for (;;) {
|
||||
DECLARE_BITMAP(mask, 64);
|
||||
u32 history = st->jmp_history_cnt;
|
||||
|
||||
if (env->log.level & BPF_LOG_LEVEL2)
|
||||
verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d\n",
|
||||
bt->frame, last_idx, first_idx);
|
||||
}
|
||||
|
||||
if (last_idx < 0) {
|
||||
/* we are at the entry into subprog, which
|
||||
@ -3613,12 +3907,13 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
if (st->curframe == 0 &&
|
||||
st->frame[0]->subprogno > 0 &&
|
||||
st->frame[0]->callsite == BPF_MAIN_FUNC &&
|
||||
stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
|
||||
bitmap_from_u64(mask, reg_mask);
|
||||
bt_stack_mask(bt) == 0 &&
|
||||
(bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) {
|
||||
bitmap_from_u64(mask, bt_reg_mask(bt));
|
||||
for_each_set_bit(i, mask, 32) {
|
||||
reg = &st->frame[0]->regs[i];
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
reg_mask &= ~(1u << i);
|
||||
bt_clear_reg(bt, i);
|
||||
continue;
|
||||
}
|
||||
reg->precise = true;
|
||||
@ -3626,26 +3921,27 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
return 0;
|
||||
}
|
||||
|
||||
verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
|
||||
st->frame[0]->subprogno, reg_mask, stack_mask);
|
||||
verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n",
|
||||
st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt));
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = last_idx;;) {
|
||||
for (i = last_idx, prev_i = -1;;) {
|
||||
if (skip_first) {
|
||||
err = 0;
|
||||
skip_first = false;
|
||||
} else {
|
||||
err = backtrack_insn(env, i, ®_mask, &stack_mask);
|
||||
err = backtrack_insn(env, i, prev_i, bt);
|
||||
}
|
||||
if (err == -ENOTSUPP) {
|
||||
mark_all_scalars_precise(env, st);
|
||||
mark_all_scalars_precise(env, env->cur_state);
|
||||
bt_reset(bt);
|
||||
return 0;
|
||||
} else if (err) {
|
||||
return err;
|
||||
}
|
||||
if (!reg_mask && !stack_mask)
|
||||
if (bt_empty(bt))
|
||||
/* Found assignment(s) into tracked register in this state.
|
||||
* Since this state is already marked, just return.
|
||||
* Nothing to be tracked further in the parent state.
|
||||
@ -3653,6 +3949,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
return 0;
|
||||
if (i == first_idx)
|
||||
break;
|
||||
prev_i = i;
|
||||
i = get_prev_insn_idx(st, i, &history);
|
||||
if (i >= env->prog->len) {
|
||||
/* This can happen if backtracking reached insn 0
|
||||
@ -3670,84 +3967,94 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
if (!st)
|
||||
break;
|
||||
|
||||
new_marks = false;
|
||||
func = st->frame[frame];
|
||||
bitmap_from_u64(mask, reg_mask);
|
||||
for_each_set_bit(i, mask, 32) {
|
||||
reg = &func->regs[i];
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
reg_mask &= ~(1u << i);
|
||||
continue;
|
||||
}
|
||||
if (!reg->precise)
|
||||
new_marks = true;
|
||||
reg->precise = true;
|
||||
}
|
||||
|
||||
bitmap_from_u64(mask, stack_mask);
|
||||
for_each_set_bit(i, mask, 64) {
|
||||
if (i >= func->allocated_stack / BPF_REG_SIZE) {
|
||||
/* the sequence of instructions:
|
||||
* 2: (bf) r3 = r10
|
||||
* 3: (7b) *(u64 *)(r3 -8) = r0
|
||||
* 4: (79) r4 = *(u64 *)(r10 -8)
|
||||
* doesn't contain jmps. It's backtracked
|
||||
* as a single block.
|
||||
* During backtracking insn 3 is not recognized as
|
||||
* stack access, so at the end of backtracking
|
||||
* stack slot fp-8 is still marked in stack_mask.
|
||||
* However the parent state may not have accessed
|
||||
* fp-8 and it's "unallocated" stack space.
|
||||
* In such case fallback to conservative.
|
||||
*/
|
||||
mark_all_scalars_precise(env, st);
|
||||
return 0;
|
||||
for (fr = bt->frame; fr >= 0; fr--) {
|
||||
func = st->frame[fr];
|
||||
bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
|
||||
for_each_set_bit(i, mask, 32) {
|
||||
reg = &func->regs[i];
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
bt_clear_frame_reg(bt, fr, i);
|
||||
continue;
|
||||
}
|
||||
if (reg->precise)
|
||||
bt_clear_frame_reg(bt, fr, i);
|
||||
else
|
||||
reg->precise = true;
|
||||
}
|
||||
|
||||
if (!is_spilled_reg(&func->stack[i])) {
|
||||
stack_mask &= ~(1ull << i);
|
||||
continue;
|
||||
bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
|
||||
for_each_set_bit(i, mask, 64) {
|
||||
if (i >= func->allocated_stack / BPF_REG_SIZE) {
|
||||
/* the sequence of instructions:
|
||||
* 2: (bf) r3 = r10
|
||||
* 3: (7b) *(u64 *)(r3 -8) = r0
|
||||
* 4: (79) r4 = *(u64 *)(r10 -8)
|
||||
* doesn't contain jmps. It's backtracked
|
||||
* as a single block.
|
||||
* During backtracking insn 3 is not recognized as
|
||||
* stack access, so at the end of backtracking
|
||||
* stack slot fp-8 is still marked in stack_mask.
|
||||
* However the parent state may not have accessed
|
||||
* fp-8 and it's "unallocated" stack space.
|
||||
* In such case fallback to conservative.
|
||||
*/
|
||||
mark_all_scalars_precise(env, env->cur_state);
|
||||
bt_reset(bt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!is_spilled_scalar_reg(&func->stack[i])) {
|
||||
bt_clear_frame_slot(bt, fr, i);
|
||||
continue;
|
||||
}
|
||||
reg = &func->stack[i].spilled_ptr;
|
||||
if (reg->precise)
|
||||
bt_clear_frame_slot(bt, fr, i);
|
||||
else
|
||||
reg->precise = true;
|
||||
}
|
||||
reg = &func->stack[i].spilled_ptr;
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
stack_mask &= ~(1ull << i);
|
||||
continue;
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
|
||||
bt_frame_reg_mask(bt, fr));
|
||||
verbose(env, "mark_precise: frame%d: parent state regs=%s ",
|
||||
fr, env->tmp_str_buf);
|
||||
fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
|
||||
bt_frame_stack_mask(bt, fr));
|
||||
verbose(env, "stack=%s: ", env->tmp_str_buf);
|
||||
print_verifier_state(env, func, true);
|
||||
}
|
||||
if (!reg->precise)
|
||||
new_marks = true;
|
||||
reg->precise = true;
|
||||
}
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
verbose(env, "parent %s regs=%x stack=%llx marks:",
|
||||
new_marks ? "didn't have" : "already had",
|
||||
reg_mask, stack_mask);
|
||||
print_verifier_state(env, func, true);
|
||||
}
|
||||
|
||||
if (!reg_mask && !stack_mask)
|
||||
break;
|
||||
if (!new_marks)
|
||||
break;
|
||||
if (bt_empty(bt))
|
||||
return 0;
|
||||
|
||||
last_idx = st->last_insn_idx;
|
||||
first_idx = st->first_insn_idx;
|
||||
}
|
||||
|
||||
/* if we still have requested precise regs or slots, we missed
|
||||
* something (e.g., stack access through non-r10 register), so
|
||||
* fallback to marking all precise
|
||||
*/
|
||||
if (!bt_empty(bt)) {
|
||||
mark_all_scalars_precise(env, env->cur_state);
|
||||
bt_reset(bt);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mark_chain_precision(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
|
||||
return __mark_chain_precision(env, regno);
|
||||
}
|
||||
|
||||
static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
|
||||
/* mark_chain_precision_batch() assumes that env->bt is set in the caller to
|
||||
* desired reg and stack masks across all relevant frames
|
||||
*/
|
||||
static int mark_chain_precision_batch(struct bpf_verifier_env *env)
|
||||
{
|
||||
return __mark_chain_precision(env, frame, regno, -1);
|
||||
}
|
||||
|
||||
static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
|
||||
{
|
||||
return __mark_chain_precision(env, frame, -1, spi);
|
||||
return __mark_chain_precision(env, -1);
|
||||
}
|
||||
|
||||
static bool is_spillable_regtype(enum bpf_reg_type type)
|
||||
@ -4109,6 +4416,7 @@ static void mark_reg_stack_read(struct bpf_verifier_env *env,
|
||||
for (i = min_off; i < max_off; i++) {
|
||||
slot = -i - 1;
|
||||
spi = slot / BPF_REG_SIZE;
|
||||
mark_stack_slot_scratched(env, spi);
|
||||
stype = ptr_state->stack[spi].slot_type;
|
||||
if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
|
||||
break;
|
||||
@ -4160,6 +4468,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
|
||||
stype = reg_state->stack[spi].slot_type;
|
||||
reg = ®_state->stack[spi].spilled_ptr;
|
||||
|
||||
mark_stack_slot_scratched(env, spi);
|
||||
|
||||
if (is_spilled_reg(®_state->stack[spi])) {
|
||||
u8 spill_size = 1;
|
||||
|
||||
@ -8220,17 +8530,13 @@ static int set_callee_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee, int insn_idx);
|
||||
|
||||
static bool is_callback_calling_kfunc(u32 btf_id);
|
||||
|
||||
static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx, int subprog,
|
||||
set_callee_state_fn set_callee_state_cb)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_func_info_aux *func_info_aux;
|
||||
struct bpf_func_state *caller, *callee;
|
||||
int err;
|
||||
bool is_global = false;
|
||||
|
||||
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
|
||||
verbose(env, "the call stack of %d frames is too deep\n",
|
||||
@ -8245,13 +8551,10 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
func_info_aux = env->prog->aux->func_info_aux;
|
||||
if (func_info_aux)
|
||||
is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
|
||||
err = btf_check_subprog_call(env, subprog, caller->regs);
|
||||
if (err == -EFAULT)
|
||||
return err;
|
||||
if (is_global) {
|
||||
if (subprog_is_global(env, subprog)) {
|
||||
if (err) {
|
||||
verbose(env, "Caller passes invalid args into func#%d\n",
|
||||
subprog);
|
||||
@ -15181,20 +15484,25 @@ static int propagate_precision(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *state_reg;
|
||||
struct bpf_func_state *state;
|
||||
int i, err = 0, fr;
|
||||
bool first;
|
||||
|
||||
for (fr = old->curframe; fr >= 0; fr--) {
|
||||
state = old->frame[fr];
|
||||
state_reg = state->regs;
|
||||
first = true;
|
||||
for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
|
||||
if (state_reg->type != SCALAR_VALUE ||
|
||||
!state_reg->precise ||
|
||||
!(state_reg->live & REG_LIVE_READ))
|
||||
continue;
|
||||
if (env->log.level & BPF_LOG_LEVEL2)
|
||||
verbose(env, "frame %d: propagating r%d\n", fr, i);
|
||||
err = mark_chain_precision_frame(env, fr, i);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
if (first)
|
||||
verbose(env, "frame %d: propagating r%d", fr, i);
|
||||
else
|
||||
verbose(env, ",r%d", i);
|
||||
}
|
||||
bt_set_frame_reg(&env->bt, fr, i);
|
||||
first = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
||||
@ -15205,14 +15513,24 @@ static int propagate_precision(struct bpf_verifier_env *env,
|
||||
!state_reg->precise ||
|
||||
!(state_reg->live & REG_LIVE_READ))
|
||||
continue;
|
||||
if (env->log.level & BPF_LOG_LEVEL2)
|
||||
verbose(env, "frame %d: propagating fp%d\n",
|
||||
fr, (-i - 1) * BPF_REG_SIZE);
|
||||
err = mark_chain_precision_stack_frame(env, fr, i);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (env->log.level & BPF_LOG_LEVEL2) {
|
||||
if (first)
|
||||
verbose(env, "frame %d: propagating fp%d",
|
||||
fr, (-i - 1) * BPF_REG_SIZE);
|
||||
else
|
||||
verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE);
|
||||
}
|
||||
bt_set_frame_slot(&env->bt, fr, i);
|
||||
first = false;
|
||||
}
|
||||
if (!first)
|
||||
verbose(env, "\n");
|
||||
}
|
||||
|
||||
err = mark_chain_precision_batch(env);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -18869,6 +19187,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
env->bt.env = env;
|
||||
|
||||
len = (*prog)->len;
|
||||
env->insn_aux_data =
|
||||
vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
|
||||
|
@ -55,6 +55,7 @@
|
||||
#include "verifier_spill_fill.skel.h"
|
||||
#include "verifier_spin_lock.skel.h"
|
||||
#include "verifier_stack_ptr.skel.h"
|
||||
#include "verifier_subprog_precision.skel.h"
|
||||
#include "verifier_subreg.skel.h"
|
||||
#include "verifier_uninit.skel.h"
|
||||
#include "verifier_unpriv.skel.h"
|
||||
@ -154,6 +155,7 @@ void test_verifier_sock(void) { RUN(verifier_sock); }
|
||||
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
|
||||
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
|
||||
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
|
||||
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
|
||||
void test_verifier_subreg(void) { RUN(verifier_subreg); }
|
||||
void test_verifier_uninit(void) { RUN(verifier_uninit); }
|
||||
void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
|
||||
|
@ -86,6 +86,10 @@
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
#define TEST_DATA_LEN 64
|
||||
|
||||
#ifndef __used
|
||||
#define __used __attribute__((used))
|
||||
#endif
|
||||
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define SYSCALL_WRAPPER 1
|
||||
#define SYS_PREFIX "__x64_"
|
||||
|
@ -651,29 +651,25 @@ int iter_stack_array_loop(const void *ctx)
|
||||
return sum;
|
||||
}
|
||||
|
||||
#define ARR_SZ 16
|
||||
|
||||
static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul)
|
||||
static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
|
||||
{
|
||||
int *t;
|
||||
__u64 i;
|
||||
int *t, i;
|
||||
|
||||
while ((t = bpf_iter_num_next(it))) {
|
||||
i = *t;
|
||||
if (i >= ARR_SZ)
|
||||
if (i >= n)
|
||||
break;
|
||||
arr[i] = i * mul;
|
||||
}
|
||||
}
|
||||
|
||||
static __noinline int sum(struct bpf_iter_num *it, int *arr)
|
||||
static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
|
||||
{
|
||||
int *t, sum = 0;;
|
||||
__u64 i;
|
||||
int *t, i, sum = 0;;
|
||||
|
||||
while ((t = bpf_iter_num_next(it))) {
|
||||
i = *t;
|
||||
if (i >= ARR_SZ)
|
||||
if (i >= n)
|
||||
break;
|
||||
sum += arr[i];
|
||||
}
|
||||
@ -685,7 +681,7 @@ SEC("raw_tp")
|
||||
__success
|
||||
int iter_pass_iter_ptr_to_subprog(const void *ctx)
|
||||
{
|
||||
int arr1[ARR_SZ], arr2[ARR_SZ];
|
||||
int arr1[16], arr2[32];
|
||||
struct bpf_iter_num it;
|
||||
int n, sum1, sum2;
|
||||
|
||||
@ -694,25 +690,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx)
|
||||
/* fill arr1 */
|
||||
n = ARRAY_SIZE(arr1);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
fill(&it, arr1, 2);
|
||||
fill(&it, arr1, n, 2);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
/* fill arr2 */
|
||||
n = ARRAY_SIZE(arr2);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
fill(&it, arr2, 10);
|
||||
fill(&it, arr2, n, 10);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
/* sum arr1 */
|
||||
n = ARRAY_SIZE(arr1);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
sum1 = sum(&it, arr1);
|
||||
sum1 = sum(&it, arr1, n);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
/* sum arr2 */
|
||||
n = ARRAY_SIZE(arr2);
|
||||
bpf_iter_num_new(&it, 0, n);
|
||||
sum2 = sum(&it, arr2);
|
||||
sum2 = sum(&it, arr2, n);
|
||||
bpf_iter_num_destroy(&it);
|
||||
|
||||
bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
|
||||
|
536
tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
Normal file
536
tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
Normal file
@ -0,0 +1,536 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
|
||||
|
||||
int vals[] SEC(".data.vals") = {1, 2, 3, 4};
|
||||
|
||||
__naked __noinline __used
|
||||
static unsigned long identity_subprog()
|
||||
{
|
||||
/* the simplest *static* 64-bit identity function */
|
||||
asm volatile (
|
||||
"r0 = r1;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
__noinline __used
|
||||
unsigned long global_identity_subprog(__u64 x)
|
||||
{
|
||||
/* the simplest *global* 64-bit identity function */
|
||||
return x;
|
||||
}
|
||||
|
||||
__naked __noinline __used
|
||||
static unsigned long callback_subprog()
|
||||
{
|
||||
/* the simplest callback function */
|
||||
asm volatile (
|
||||
"r0 = 0;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("7: (0f) r1 += r0")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
|
||||
__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int subprog_result_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
/* pass r6 through r1 into subprog to get it back as r0;
|
||||
* this whole chain will have to be marked as precise later
|
||||
*/
|
||||
"r1 = r6;"
|
||||
"call identity_subprog;"
|
||||
/* now use subprog's returned value (which is a
|
||||
* r6 -> r1 -> r0 chain), as index into vals array, forcing
|
||||
* all of that to be known precisely
|
||||
*/
|
||||
"r0 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r0->r1->r6 chain is forced to be precise and has to be
|
||||
* propagated back to the beginning, including through the
|
||||
* subprog call
|
||||
*/
|
||||
"r1 += r0;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("9: (0f) r1 += r0")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
|
||||
__naked int global_subprog_result_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
/* pass r6 through r1 into subprog to get it back as r0;
|
||||
* given global_identity_subprog is global, precision won't
|
||||
* propagate all the way back to r6
|
||||
*/
|
||||
"r1 = r6;"
|
||||
"call global_identity_subprog;"
|
||||
/* now use subprog's returned value (which is unknown now, so
|
||||
* we need to clamp it), as index into vals array, forcing r0
|
||||
* to be marked precise (with no effect on r6, though)
|
||||
*/
|
||||
"if r0 < %[vals_arr_sz] goto 1f;"
|
||||
"r0 = %[vals_arr_sz] - 1;"
|
||||
"1:"
|
||||
"r0 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r0 is forced to be precise and has to be
|
||||
* propagated back to the global subprog call, but it
|
||||
* shouldn't go all the way to mark r6 as precise
|
||||
*/
|
||||
"r1 += r0;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_const(vals_arr_sz, ARRAY_SIZE(vals))
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("14: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 10")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
|
||||
__msg("mark_precise: frame0: parent state regs=r0 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
|
||||
__naked int callback_result_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and use result; r0 shouldn't propagate back to
|
||||
* callback_subprog
|
||||
*/
|
||||
"r1 = r6;" /* nr_loops */
|
||||
"r2 = %[callback_subprog];" /* callback_fn */
|
||||
"r3 = 0;" /* callback_ctx */
|
||||
"r4 = 0;" /* flags */
|
||||
"call %[bpf_loop];"
|
||||
|
||||
"r6 = r0;"
|
||||
"if r6 > 3 goto 1f;"
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the bpf_loop() call, but not beyond
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"1:"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_ptr(callback_subprog),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("7: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 7 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
|
||||
__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_callee_saved_reg_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call identity_subprog;"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("7: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 7 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_callee_saved_reg_precise_global(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call global_identity_subprog;"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("12: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 12 first_idx 10")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: parent state regs=r6 stack=:")
|
||||
__msg("mark_precise: frame0: last_idx 16 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
|
||||
__naked int parent_callee_saved_reg_precise_with_callback(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 1;" /* nr_loops */
|
||||
"r2 = %[callback_subprog];" /* callback_fn */
|
||||
"r3 = 0;" /* callback_ctx */
|
||||
"r4 = 0;" /* flags */
|
||||
"call %[bpf_loop];"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) callback call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_ptr(callback_subprog),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("9: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=-8:")
|
||||
__msg("mark_precise: frame0: last_idx 13 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
|
||||
__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_stack_slot_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill reg */
|
||||
"r6 = 3;"
|
||||
"*(u64 *)(r10 - 8) = r6;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call identity_subprog;"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r6 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("9: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 9 first_idx 6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=-8:")
|
||||
__msg("mark_precise: frame0: last_idx 5 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
|
||||
__naked int parent_stack_slot_precise_global(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill reg */
|
||||
"r6 = 3;"
|
||||
"*(u64 *)(r10 - 8) = r6;"
|
||||
|
||||
/* call subprog and ignore result; we need this call only to
|
||||
* complicate jump history
|
||||
*/
|
||||
"r1 = 0;"
|
||||
"call global_identity_subprog;"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r6 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("14: (0f) r1 += r6")
|
||||
__msg("mark_precise: frame0: last_idx 14 first_idx 11")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=-8:")
|
||||
__msg("mark_precise: frame0: last_idx 18 first_idx 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
|
||||
__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
|
||||
__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
|
||||
__naked int parent_stack_slot_precise_with_callback(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill reg */
|
||||
"r6 = 3;"
|
||||
"*(u64 *)(r10 - 8) = r6;"
|
||||
|
||||
/* ensure we have callback frame in jump history */
|
||||
"r1 = r6;" /* nr_loops */
|
||||
"r2 = %[callback_subprog];" /* callback_fn */
|
||||
"r3 = 0;" /* callback_ctx */
|
||||
"r4 = 0;" /* flags */
|
||||
"call %[bpf_loop];"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r6 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r6 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r6 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling (and ignoring) subprog call
|
||||
*/
|
||||
"r1 += r6;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals),
|
||||
__imm_ptr(callback_subprog),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
__noinline __used
|
||||
static __u64 subprog_with_precise_arg(__u64 x)
|
||||
{
|
||||
return vals[x]; /* x is forced to be precise */
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
__msg("8: (0f) r2 += r1")
|
||||
__msg("mark_precise: frame1: last_idx 8 first_idx 0")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
|
||||
__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
|
||||
__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
|
||||
__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
|
||||
__naked int subprog_arg_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 3;"
|
||||
"r1 = r6;"
|
||||
/* subprog_with_precise_arg expects its argument to be
|
||||
* precise, so r1->r6 will be marked precise from inside the
|
||||
* subprog
|
||||
*/
|
||||
"call subprog_with_precise_arg;"
|
||||
"r0 += r6;"
|
||||
"exit;"
|
||||
:
|
||||
:
|
||||
: __clobber_common, "r6"
|
||||
);
|
||||
}
|
||||
|
||||
/* r1 is pointer to stack slot;
|
||||
* r2 is a register to spill into that slot
|
||||
* subprog also spills r2 into its own stack slot
|
||||
*/
|
||||
__naked __noinline __used
|
||||
static __u64 subprog_spill_reg_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* spill to parent stack */
|
||||
"*(u64 *)(r1 + 0) = r2;"
|
||||
/* spill to subprog stack (we use -16 offset to avoid
|
||||
* accidental confusion with parent's -8 stack slot in
|
||||
* verifier log output)
|
||||
*/
|
||||
"*(u64 *)(r10 - 16) = r2;"
|
||||
/* use both spills as return result to propagete precision everywhere */
|
||||
"r0 = *(u64 *)(r10 - 16);"
|
||||
"r2 = *(u64 *)(r1 + 0);"
|
||||
"r0 += r2;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
/* precision backtracking can't currently handle stack access not through r10,
|
||||
* so we won't be able to mark stack slot fp-8 as precise, and so will
|
||||
* fallback to forcing all as precise
|
||||
*/
|
||||
__msg("mark_precise: frame0: falling back to forcing all scalars precise")
|
||||
__naked int subprog_spill_into_parent_stack_slot_precise(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 1;"
|
||||
|
||||
/* pass pointer to stack slot and r6 to subprog;
|
||||
* r6 will be marked precise and spilled into fp-8 slot, which
|
||||
* also should be marked precise
|
||||
*/
|
||||
"r1 = r10;"
|
||||
"r1 += -8;"
|
||||
"r2 = r6;"
|
||||
"call subprog_spill_reg_precise;"
|
||||
|
||||
/* restore reg from stack; in this case we'll be carrying
|
||||
* stack mask when going back into subprog through jump
|
||||
* history
|
||||
*/
|
||||
"r7 = *(u64 *)(r10 - 8);"
|
||||
|
||||
"r7 *= 4;"
|
||||
"r1 = %[vals];"
|
||||
/* here r7 is forced to be precise and has to be propagated
|
||||
* back to the beginning, handling subprog call and logic
|
||||
*/
|
||||
"r1 += r7;"
|
||||
"r0 = *(u32 *)(r1 + 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(vals)
|
||||
: __clobber_common, "r6", "r7"
|
||||
);
|
||||
}
|
||||
|
||||
__naked __noinline __used
|
||||
static __u64 subprog_with_checkpoint(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r0 = 0;"
|
||||
/* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */
|
||||
"goto +0;"
|
||||
"exit;"
|
||||
);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -38,25 +38,24 @@
|
||||
.fixup_map_array_48b = { 1 },
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.errstr =
|
||||
"26: (85) call bpf_probe_read_kernel#113\
|
||||
last_idx 26 first_idx 20\
|
||||
regs=4 stack=0 before 25\
|
||||
regs=4 stack=0 before 24\
|
||||
regs=4 stack=0 before 23\
|
||||
regs=4 stack=0 before 22\
|
||||
regs=4 stack=0 before 20\
|
||||
parent didn't have regs=4 stack=0 marks\
|
||||
last_idx 19 first_idx 10\
|
||||
regs=4 stack=0 before 19\
|
||||
regs=200 stack=0 before 18\
|
||||
regs=300 stack=0 before 17\
|
||||
regs=201 stack=0 before 15\
|
||||
regs=201 stack=0 before 14\
|
||||
regs=200 stack=0 before 13\
|
||||
regs=200 stack=0 before 12\
|
||||
regs=200 stack=0 before 11\
|
||||
regs=200 stack=0 before 10\
|
||||
parent already had regs=0 stack=0 marks",
|
||||
"mark_precise: frame0: last_idx 26 first_idx 20\
|
||||
mark_precise: frame0: regs=r2 stack= before 25\
|
||||
mark_precise: frame0: regs=r2 stack= before 24\
|
||||
mark_precise: frame0: regs=r2 stack= before 23\
|
||||
mark_precise: frame0: regs=r2 stack= before 22\
|
||||
mark_precise: frame0: regs=r2 stack= before 20\
|
||||
mark_precise: frame0: parent state regs=r2 stack=:\
|
||||
mark_precise: frame0: last_idx 19 first_idx 10\
|
||||
mark_precise: frame0: regs=r2 stack= before 19\
|
||||
mark_precise: frame0: regs=r9 stack= before 18\
|
||||
mark_precise: frame0: regs=r8,r9 stack= before 17\
|
||||
mark_precise: frame0: regs=r0,r9 stack= before 15\
|
||||
mark_precise: frame0: regs=r0,r9 stack= before 14\
|
||||
mark_precise: frame0: regs=r9 stack= before 13\
|
||||
mark_precise: frame0: regs=r9 stack= before 12\
|
||||
mark_precise: frame0: regs=r9 stack= before 11\
|
||||
mark_precise: frame0: regs=r9 stack= before 10\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
},
|
||||
{
|
||||
"precise: test 2",
|
||||
@ -100,20 +99,20 @@
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr =
|
||||
"26: (85) call bpf_probe_read_kernel#113\
|
||||
last_idx 26 first_idx 22\
|
||||
regs=4 stack=0 before 25\
|
||||
regs=4 stack=0 before 24\
|
||||
regs=4 stack=0 before 23\
|
||||
regs=4 stack=0 before 22\
|
||||
parent didn't have regs=4 stack=0 marks\
|
||||
last_idx 20 first_idx 20\
|
||||
regs=4 stack=0 before 20\
|
||||
parent didn't have regs=4 stack=0 marks\
|
||||
last_idx 19 first_idx 17\
|
||||
regs=4 stack=0 before 19\
|
||||
regs=200 stack=0 before 18\
|
||||
regs=300 stack=0 before 17\
|
||||
parent already had regs=0 stack=0 marks",
|
||||
mark_precise: frame0: last_idx 26 first_idx 22\
|
||||
mark_precise: frame0: regs=r2 stack= before 25\
|
||||
mark_precise: frame0: regs=r2 stack= before 24\
|
||||
mark_precise: frame0: regs=r2 stack= before 23\
|
||||
mark_precise: frame0: regs=r2 stack= before 22\
|
||||
mark_precise: frame0: parent state regs=r2 stack=:\
|
||||
mark_precise: frame0: last_idx 20 first_idx 20\
|
||||
mark_precise: frame0: regs=r2 stack= before 20\
|
||||
mark_precise: frame0: parent state regs=r2 stack=:\
|
||||
mark_precise: frame0: last_idx 19 first_idx 17\
|
||||
mark_precise: frame0: regs=r2 stack= before 19\
|
||||
mark_precise: frame0: regs=r9 stack= before 18\
|
||||
mark_precise: frame0: regs=r8,r9 stack= before 17\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
},
|
||||
{
|
||||
"precise: cross frame pruning",
|
||||
@ -153,15 +152,16 @@
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr = "5: (2d) if r4 > r0 goto pc+0\
|
||||
last_idx 5 first_idx 5\
|
||||
parent didn't have regs=10 stack=0 marks\
|
||||
last_idx 4 first_idx 2\
|
||||
regs=10 stack=0 before 4\
|
||||
regs=10 stack=0 before 3\
|
||||
regs=0 stack=1 before 2\
|
||||
last_idx 5 first_idx 5\
|
||||
parent didn't have regs=1 stack=0 marks",
|
||||
.errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
|
||||
mark_precise: frame0: parent state regs=r4 stack=:\
|
||||
mark_precise: frame0: last_idx 4 first_idx 2\
|
||||
mark_precise: frame0: regs=r4 stack= before 4\
|
||||
mark_precise: frame0: regs=r4 stack= before 3\
|
||||
mark_precise: frame0: regs= stack=-8 before 2\
|
||||
mark_precise: frame0: falling back to forcing all scalars precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
mark_precise: frame0: last_idx 5 first_idx 5\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.retval = -1,
|
||||
},
|
||||
@ -179,16 +179,19 @@
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr = "last_idx 6 first_idx 6\
|
||||
parent didn't have regs=10 stack=0 marks\
|
||||
last_idx 5 first_idx 3\
|
||||
regs=10 stack=0 before 5\
|
||||
regs=10 stack=0 before 4\
|
||||
regs=0 stack=1 before 3\
|
||||
last_idx 6 first_idx 6\
|
||||
parent didn't have regs=1 stack=0 marks\
|
||||
last_idx 5 first_idx 3\
|
||||
regs=1 stack=0 before 5",
|
||||
.errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
|
||||
mark_precise: frame0: parent state regs=r4 stack=:\
|
||||
mark_precise: frame0: last_idx 5 first_idx 3\
|
||||
mark_precise: frame0: regs=r4 stack= before 5\
|
||||
mark_precise: frame0: regs=r4 stack= before 4\
|
||||
mark_precise: frame0: regs= stack=-8 before 3\
|
||||
mark_precise: frame0: falling back to forcing all scalars precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
force_precise: frame0: forcing r0 to be precise\
|
||||
mark_precise: frame0: last_idx 6 first_idx 6\
|
||||
mark_precise: frame0: parent state regs= stack=:",
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.retval = -1,
|
||||
},
|
||||
|
@ -141,6 +141,7 @@ static struct env {
|
||||
bool verbose;
|
||||
bool debug;
|
||||
bool quiet;
|
||||
bool force_checkpoints;
|
||||
enum resfmt out_fmt;
|
||||
bool show_version;
|
||||
bool comparison_mode;
|
||||
@ -209,6 +210,8 @@ static const struct argp_option opts[] = {
|
||||
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
|
||||
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
|
||||
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
|
||||
{ "test-states", 't', NULL, 0,
|
||||
"Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
|
||||
{ "quiet", 'q', NULL, 0, "Quiet mode" },
|
||||
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
|
||||
{ "sort", 's', "SPEC", 0, "Specify sort order" },
|
||||
@ -284,6 +287,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
argp_usage(state);
|
||||
}
|
||||
break;
|
||||
case 't':
|
||||
env.force_checkpoints = true;
|
||||
break;
|
||||
case 'C':
|
||||
env.comparison_mode = true;
|
||||
break;
|
||||
@ -989,6 +995,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
|
||||
/* increase chances of successful BPF object loading */
|
||||
fixup_obj(obj, prog, base_filename);
|
||||
|
||||
if (env.force_checkpoints)
|
||||
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ);
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
env.progs_processed++;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user