mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 22:51:35 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2019-04-25 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) the bpf verifier fix to properly mark registers in all stack frames, from Paul. 2) preempt_enable_no_resched->preempt_enable fix, from Peter. 3) other misc fixes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ad759c9069
@ -186,8 +186,9 @@ enum which_ebpf_reg {
|
||||
* separate frame pointer, so BPF_REG_10 relative accesses are
|
||||
* adjusted to be $sp relative.
|
||||
*/
|
||||
int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
|
||||
enum which_ebpf_reg w)
|
||||
static int ebpf_to_mips_reg(struct jit_ctx *ctx,
|
||||
const struct bpf_insn *insn,
|
||||
enum which_ebpf_reg w)
|
||||
{
|
||||
int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
|
||||
insn->src_reg : insn->dst_reg;
|
||||
|
@ -510,7 +510,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
} \
|
||||
_out: \
|
||||
rcu_read_unlock(); \
|
||||
preempt_enable_no_resched(); \
|
||||
preempt_enable(); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
|
@ -4138,15 +4138,35 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __find_good_pkt_pointers(struct bpf_func_state *state,
|
||||
struct bpf_reg_state *dst_reg,
|
||||
enum bpf_reg_type type, u16 new_range)
|
||||
{
|
||||
struct bpf_reg_state *reg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++) {
|
||||
reg = &state->regs[i];
|
||||
if (reg->type == type && reg->id == dst_reg->id)
|
||||
/* keep the maximum range already checked */
|
||||
reg->range = max(reg->range, new_range);
|
||||
}
|
||||
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
if (reg->type == type && reg->id == dst_reg->id)
|
||||
reg->range = max(reg->range, new_range);
|
||||
}
|
||||
}
|
||||
|
||||
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
|
||||
struct bpf_reg_state *dst_reg,
|
||||
enum bpf_reg_type type,
|
||||
bool range_right_open)
|
||||
{
|
||||
struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
||||
struct bpf_reg_state *regs = state->regs, *reg;
|
||||
u16 new_range;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
if (dst_reg->off < 0 ||
|
||||
(dst_reg->off == 0 && range_right_open))
|
||||
@ -4211,20 +4231,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
|
||||
* the range won't allow anything.
|
||||
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
|
||||
*/
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (regs[i].type == type && regs[i].id == dst_reg->id)
|
||||
/* keep the maximum range already checked */
|
||||
regs[i].range = max(regs[i].range, new_range);
|
||||
|
||||
for (j = 0; j <= vstate->curframe; j++) {
|
||||
state = vstate->frame[j];
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
if (reg->type == type && reg->id == dst_reg->id)
|
||||
reg->range = max(reg->range, new_range);
|
||||
}
|
||||
}
|
||||
for (i = 0; i <= vstate->curframe; i++)
|
||||
__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
|
||||
new_range);
|
||||
}
|
||||
|
||||
/* compute branch direction of the expression "if (reg opcode val) goto target;"
|
||||
@ -4698,6 +4707,22 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
||||
}
|
||||
}
|
||||
|
||||
static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
|
||||
bool is_null)
|
||||
{
|
||||
struct bpf_reg_state *reg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
|
||||
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
mark_ptr_or_null_reg(state, reg, id, is_null);
|
||||
}
|
||||
}
|
||||
|
||||
/* The logic is similar to find_good_pkt_pointers(), both could eventually
|
||||
* be folded together at some point.
|
||||
*/
|
||||
@ -4705,10 +4730,10 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
|
||||
bool is_null)
|
||||
{
|
||||
struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
||||
struct bpf_reg_state *reg, *regs = state->regs;
|
||||
struct bpf_reg_state *regs = state->regs;
|
||||
u32 ref_obj_id = regs[regno].ref_obj_id;
|
||||
u32 id = regs[regno].id;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
if (ref_obj_id && ref_obj_id == id && is_null)
|
||||
/* regs[regno] is in the " == NULL" branch.
|
||||
@ -4717,17 +4742,8 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
|
||||
*/
|
||||
WARN_ON_ONCE(release_reference_state(state, id));
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
mark_ptr_or_null_reg(state, ®s[i], id, is_null);
|
||||
|
||||
for (j = 0; j <= vstate->curframe; j++) {
|
||||
state = vstate->frame[j];
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
mark_ptr_or_null_reg(state, reg, id, is_null);
|
||||
}
|
||||
}
|
||||
for (i = 0; i <= vstate->curframe; i++)
|
||||
__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
|
||||
}
|
||||
|
||||
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
||||
|
@ -1151,6 +1151,9 @@ static int do_create(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
NEXT_ARG();
|
||||
} else {
|
||||
p_err("unknown arg %s", *argv);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
1
tools/lib/bpf/.gitignore
vendored
1
tools/lib/bpf/.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
libbpf_version.h
|
||||
FEATURE-DUMP.libbpf
|
||||
test_libbpf
|
||||
libbpf.so.*
|
||||
|
@ -374,6 +374,31 @@
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"calls: ptr null check in subprog",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"calls: two calls with args",
|
||||
.insns = {
|
||||
|
@ -631,3 +631,25 @@
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"direct packet access: test29 (reg > pkt_end in subprog)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user