bpf: generalize reg_set_min_max() to handle two sets of two registers

Change reg_set_min_max() to take FALSE/TRUE sets of two registers each,
instead of assuming that we are always comparing to a constant. For now
we still assume that right-hand side registers are constants (and make
sure that's the case by swapping src/dst regs, if necessary), but
subsequent patches will remove this limitation.

reg_set_min_max() is now called unconditionally for any register
comparison, so that might include pointer vs pointer. This makes it
consistent with is_branch_taken() generality. But we currently only
support adjustments based on SCALAR vs SCALAR comparisons, so
reg_set_min_max() has to guard itself againts pointers.

Taking two by two registers allows to further unify and simplify
check_cond_jmp_op() logic. We utilize fake register for BPF_K
conditional jump case, just like with is_branch_taken() part.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231102033759.2541186-18-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Andrii Nakryiko 2023-11-01 20:37:59 -07:00 committed by Alexei Starovoitov
parent 811476e9cc
commit 4621202adc

View File

@ -14400,32 +14400,50 @@ static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg
return is_scalar_branch_taken(reg1, reg2, opcode, is_jmp32);
}
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
* In JEQ/JNE cases we also adjust the var_off values.
/* Adjusts the register min/max values in the case that the dst_reg and
* src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K
* check, in which case we havea fake SCALAR_VALUE representing insn->imm).
* Technically we can do similar adjustments for pointers to the same object,
* but we don't support that right now.
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg1,
struct bpf_reg_state *true_reg2,
struct bpf_reg_state *false_reg1,
u64 uval, u32 uval32,
struct bpf_reg_state *false_reg2,
u8 opcode, bool is_jmp32)
{
struct tnum false_32off = tnum_subreg(false_reg1->var_off);
struct tnum false_64off = false_reg1->var_off;
struct tnum true_32off = tnum_subreg(true_reg1->var_off);
struct tnum true_64off = true_reg1->var_off;
s64 sval = (s64)uval;
s32 sval32 = (s32)uval32;
struct tnum false_32off, false_64off;
struct tnum true_32off, true_64off;
u64 uval;
u32 uval32;
s64 sval;
s32 sval32;
/* If the dst_reg is a pointer, we can't learn anything about its
* variable offset from the compare (unless src_reg were a pointer into
* the same object, but we don't bother with that.
* Since false_reg1 and true_reg1 have the same type by construction, we
* only need to check one of them for pointerness.
/* If either register is a pointer, we can't learn anything about its
* variable offset from the compare (unless they were a pointer into
* the same object, but we don't bother with that).
*/
if (__is_pointer_value(false, false_reg1))
if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE)
return;
/* we expect right-hand registers (src ones) to be constants, for now */
if (!is_reg_const(false_reg2, is_jmp32)) {
opcode = flip_opcode(opcode);
swap(true_reg1, true_reg2);
swap(false_reg1, false_reg2);
}
if (!is_reg_const(false_reg2, is_jmp32))
return;
false_32off = tnum_subreg(false_reg1->var_off);
false_64off = false_reg1->var_off;
true_32off = tnum_subreg(true_reg1->var_off);
true_64off = true_reg1->var_off;
uval = false_reg2->var_off.value;
uval32 = (u32)tnum_subreg(false_reg2->var_off).value;
sval = (s64)uval;
sval32 = (s32)uval32;
switch (opcode) {
/* JEQ/JNE comparison doesn't change the register equivalence.
*
@ -14562,22 +14580,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg1,
}
}
/* Same as above, but for the case that dst_reg holds a constant and src_reg is
* the variable reg.
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg,
u64 uval, u32 uval32,
u8 opcode, bool is_jmp32)
{
opcode = flip_opcode(opcode);
/* This uses zero as "not present in table"; luckily the zero opcode,
* BPF_JA, can't get here.
*/
if (opcode)
reg_set_min_max(true_reg, false_reg, uval, uval32, opcode, is_jmp32);
}
/* Regs are known to be equal, so intersect their min/max/var_off */
static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
struct bpf_reg_state *dst_reg)
@ -14902,53 +14904,32 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EFAULT;
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
* this is only legit if both are scalars (or pointers to the same
* object, I suppose, see the PTR_MAYBE_NULL related if block below),
* because otherwise the different base pointers mean the offsets aren't
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
reg_set_min_max(&other_branch_regs[insn->dst_reg],
&other_branch_regs[insn->src_reg],
dst_reg, src_reg, opcode, is_jmp32);
if (dst_reg->type == SCALAR_VALUE &&
src_reg->type == SCALAR_VALUE) {
if (tnum_is_const(src_reg->var_off) ||
(is_jmp32 &&
tnum_is_const(tnum_subreg(src_reg->var_off))))
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg,
src_reg->var_off.value,
tnum_subreg(src_reg->var_off).value,
opcode, is_jmp32);
else if (tnum_is_const(dst_reg->var_off) ||
(is_jmp32 &&
tnum_is_const(tnum_subreg(dst_reg->var_off))))
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
src_reg,
dst_reg->var_off.value,
tnum_subreg(dst_reg->var_off).value,
opcode, is_jmp32);
else if (!is_jmp32 &&
(opcode == BPF_JEQ || opcode == BPF_JNE))
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
src_reg, dst_reg, opcode);
if (src_reg->id &&
!WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
find_equal_scalars(this_branch, src_reg);
find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
}
src_reg->type == SCALAR_VALUE &&
!is_jmp32 && (opcode == BPF_JEQ || opcode == BPF_JNE)) {
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
src_reg, dst_reg, opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
} else /* BPF_SRC(insn->code) == BPF_K */ {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, insn->imm, (u32)insn->imm,
opcode, is_jmp32);
src_reg /* fake one */,
dst_reg, src_reg /* same fake one */,
opcode, is_jmp32);
}
if (BPF_SRC(insn->code) == BPF_X &&
src_reg->type == SCALAR_VALUE && src_reg->id &&
!WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
find_equal_scalars(this_branch, src_reg);
find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
}
if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
!WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
find_equal_scalars(this_branch, dst_reg);