Alexei Starovoitov says:

====================
pull-request: bpf 2021-02-26

1) Fix for bpf atomic insns with src_reg=r0, from Brendan.

2) Fix use after free due to bpf_prog_clone, from Cong.

3) Drop imprecise verifier log message, from Dmitrii.

4) Remove incorrect blank line in bpf helper description, from Hangbin.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: No need to drop the packet when there is no geneve opt
  bpf: Remove blank line in bpf helper description comment
  tools/resolve_btfids: Fix build error with older host toolchains
  selftests/bpf: Fix a compiler warning in global func test
  bpf: Drop imprecise log message
  bpf: Clear percpu pointers in bpf_prog_clone_free()
  bpf: Fix a warning message in mark_ptr_not_null_reg()
  bpf, x86: Fix BPF_FETCH atomic and/or/xor with r0 as src
====================

Link: https://lore.kernel.org/r/20210226193737.57004-1-alexei.starovoitov@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-02-26 13:16:29 -08:00
commit 9e8e714f2d
10 changed files with 41 additions and 13 deletions

View File

@ -1349,6 +1349,7 @@ st: if (is_imm8(insn->off))
insn->imm == (BPF_XOR | BPF_FETCH)) {
u8 *branch_target;
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
u32 real_src_reg = src_reg;
/*
* Can't be implemented with a single x86 insn.
@ -1357,6 +1358,9 @@ st: if (is_imm8(insn->off))
/* Will need RAX as a CMPXCHG operand so save R0 */
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
if (src_reg == BPF_REG_0)
real_src_reg = BPF_REG_AX;
branch_target = prog;
/* Load old value */
emit_ldx(&prog, BPF_SIZE(insn->code),
@ -1366,9 +1370,9 @@ st: if (is_imm8(insn->off))
* put the result in the AUX_REG.
*/
emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
maybe_emit_mod(&prog, AUX_REG, src_reg, is64);
maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
add_2reg(0xC0, AUX_REG, src_reg));
add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG,
dst_reg, AUX_REG, insn->off,
@ -1381,7 +1385,7 @@ st: if (is_imm8(insn->off))
*/
EMIT2(X86_JNE, -(prog - branch_target) - 2);
/* Return the pre-modification value */
emit_mov_reg(&prog, is64, src_reg, BPF_REG_0);
emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
/* Restore R0 after clobbering RAX */
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
break;

View File

@ -3850,7 +3850,6 @@ union bpf_attr {
*
* long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
* Description
* Check ctx packet size against exceeding MTU of net device (based
* on *ifindex*). This helper will likely be used in combination
* with helpers that adjust/change the packet size.

View File

@ -4321,8 +4321,6 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
* is not supported yet.
* BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
*/
if (log->level & BPF_LOG_LEVEL)
bpf_log(log, "arg#%d type is not a struct\n", arg);
return NULL;
}
tname = btf_name_by_offset(btf, t->name_off);

View File

@ -1118,6 +1118,8 @@ static void bpf_prog_clone_free(struct bpf_prog *fp)
* clone is guaranteed to not be locked.
*/
fp->aux = NULL;
fp->stats = NULL;
fp->active = NULL;
__bpf_prog_free(fp);
}

View File

@ -1120,7 +1120,7 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
reg->type = PTR_TO_RDWR_BUF;
break;
default:
WARN_ON("unknown nullable register type");
WARN_ONCE(1, "unknown nullable register type");
}
}

View File

@ -260,6 +260,11 @@ static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
return btf_id__add(root, id, false);
}
/* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */
#ifndef SHF_COMPRESSED
#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */
#endif
/*
* The data of compressed section should be aligned to 4
* (for 32bit) or 8 (for 64 bit) bytes. The binutils ld

View File

@ -3850,7 +3850,6 @@ union bpf_attr {
*
* long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
* Description
* Check ctx packet size against exceeding MTU of net device (based
* on *ifindex*). This helper will likely be used in combination
* with helpers that adjust/change the packet size.

View File

@ -15,5 +15,5 @@ __noinline int foo(const struct S *s)
SEC("cgroup_skb/ingress")
int test_cls(struct __sk_buff *skb)
{
return foo(skb);
return foo((const void *)skb);
}

View File

@ -446,10 +446,8 @@ int _geneve_get_tunnel(struct __sk_buff *skb)
}
ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0) {
ERROR(ret);
return TC_ACT_SHOT;
}
if (ret < 0)
gopt.opt_class = 0;
bpf_trace_printk(fmt, sizeof(fmt),
key.tunnel_id, key.remote_ipv4, gopt.opt_class);

View File

@ -75,3 +75,26 @@
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_AND with fetch - r0 as source reg",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_0, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_0, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},