mirror of
https://github.com/torvalds/linux.git
synced 2024-12-11 13:41:55 +00:00
bpf: extend is_branch_taken to registers
This patch extends is_branch_taken() logic from JMP+K instructions to JMP+X instructions. Conditional branches are often done when src and dst registers contain known scalars. In such case the verifier can follow the branch that is going to be taken when program executes. That speeds up the verification and is essential feature to support bounded loops. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
fc559a70d5
commit
fb8d251ee2
@ -5266,9 +5266,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
||||
struct bpf_verifier_state *this_branch = env->cur_state;
|
||||
struct bpf_verifier_state *other_branch;
|
||||
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
|
||||
struct bpf_reg_state *dst_reg, *other_branch_regs;
|
||||
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
bool is_jmp32;
|
||||
int pred = -1;
|
||||
int err;
|
||||
|
||||
/* Only conditional jumps are expected to reach here. */
|
||||
@ -5293,6 +5294,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
||||
insn->src_reg);
|
||||
return -EACCES;
|
||||
}
|
||||
src_reg = ®s[insn->src_reg];
|
||||
} else {
|
||||
if (insn->src_reg != BPF_REG_0) {
|
||||
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
|
||||
@ -5308,20 +5310,22 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
||||
dst_reg = ®s[insn->dst_reg];
|
||||
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
|
||||
|
||||
if (BPF_SRC(insn->code) == BPF_K) {
|
||||
int pred = is_branch_taken(dst_reg, insn->imm, opcode,
|
||||
is_jmp32);
|
||||
|
||||
if (pred == 1) {
|
||||
/* only follow the goto, ignore fall-through */
|
||||
*insn_idx += insn->off;
|
||||
return 0;
|
||||
} else if (pred == 0) {
|
||||
/* only follow fall-through branch, since
|
||||
* that's where the program will go
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
if (BPF_SRC(insn->code) == BPF_K)
|
||||
pred = is_branch_taken(dst_reg, insn->imm,
|
||||
opcode, is_jmp32);
|
||||
else if (src_reg->type == SCALAR_VALUE &&
|
||||
tnum_is_const(src_reg->var_off))
|
||||
pred = is_branch_taken(dst_reg, src_reg->var_off.value,
|
||||
opcode, is_jmp32);
|
||||
if (pred == 1) {
|
||||
/* only follow the goto, ignore fall-through */
|
||||
*insn_idx += insn->off;
|
||||
return 0;
|
||||
} else if (pred == 0) {
|
||||
/* only follow fall-through branch, since
|
||||
* that's where the program will go
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
|
||||
|
Loading…
Reference in New Issue
Block a user