mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 23:51:37 +00:00
bpf: Add BPF_FETCH field / create atomic_fetch_add instruction
The BPF_FETCH field can be set in bpf_insn.imm, for BPF_ATOMIC instructions, in order to have the previous value of the atomically-modified memory location loaded into the src register after an atomic op is carried out. Suggested-by: Yonghong Song <yhs@fb.com> Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20210114181751.768687-7-jackmanb@google.com
This commit is contained in:
parent
c5bcb5eb4d
commit
5ca419f286
@ -811,6 +811,10 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
|
||||
/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
|
||||
EMIT1(simple_alu_opcodes[atomic_op]);
|
||||
break;
|
||||
case BPF_ADD | BPF_FETCH:
|
||||
/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
|
||||
EMIT2(0x0F, 0xC1);
|
||||
break;
|
||||
default:
|
||||
pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
|
||||
return -EFAULT;
|
||||
|
@ -264,6 +264,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
||||
* Atomic operations:
|
||||
*
|
||||
* BPF_ADD *(uint *) (dst_reg + off16) += src_reg
|
||||
* BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
|
||||
*/
|
||||
|
||||
#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
|
||||
|
@ -44,6 +44,9 @@
|
||||
#define BPF_CALL 0x80 /* function call */
|
||||
#define BPF_EXIT 0x90 /* function return */
|
||||
|
||||
/* atomic op type fields (stored in immediate) */
|
||||
#define BPF_FETCH 0x01 /* fetch previous value into src reg */
|
||||
|
||||
/* Register numbers */
|
||||
enum {
|
||||
BPF_REG_0 = 0,
|
||||
|
@ -1624,16 +1624,29 @@ out:
|
||||
/* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
|
||||
atomic_add((u32) SRC, (atomic_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
break;
|
||||
case BPF_ADD | BPF_FETCH:
|
||||
SRC = (u32) atomic_fetch_add(
|
||||
(u32) SRC,
|
||||
(atomic_t *)(unsigned long) (DST + insn->off));
|
||||
break;
|
||||
default:
|
||||
goto default_label;
|
||||
}
|
||||
CONT;
|
||||
|
||||
STX_ATOMIC_DW:
|
||||
switch (IMM) {
|
||||
case BPF_ADD:
|
||||
/* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
|
||||
atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
break;
|
||||
case BPF_ADD | BPF_FETCH:
|
||||
SRC = (u64) atomic64_fetch_add(
|
||||
(u64) SRC,
|
||||
(atomic64_t *)(unsigned long) (DST + insn->off));
|
||||
break;
|
||||
default:
|
||||
goto default_label;
|
||||
}
|
||||
|
@ -160,6 +160,13 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg, insn->off,
|
||||
insn->src_reg);
|
||||
} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
||||
insn->imm == (BPF_ADD | BPF_FETCH)) {
|
||||
verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_add((%s *)(r%d %+d), r%d)\n",
|
||||
insn->code, insn->src_reg,
|
||||
BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg, insn->off, insn->src_reg);
|
||||
} else {
|
||||
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
|
||||
}
|
||||
|
@ -3608,7 +3608,11 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
|
||||
{
|
||||
int err;
|
||||
|
||||
if (insn->imm != BPF_ADD) {
|
||||
switch (insn->imm) {
|
||||
case BPF_ADD:
|
||||
case BPF_ADD | BPF_FETCH:
|
||||
break;
|
||||
default:
|
||||
verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3650,8 +3654,20 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
|
||||
return err;
|
||||
|
||||
/* check whether we can write into the same memory */
|
||||
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!(insn->imm & BPF_FETCH))
|
||||
return 0;
|
||||
|
||||
/* check and record load of old value into src reg */
|
||||
err = check_reg_arg(env, insn->src_reg, DST_OP);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
|
||||
@ -9528,12 +9544,6 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
} else if (class == BPF_STX) {
|
||||
enum bpf_reg_type *prev_dst_type, dst_reg_type;
|
||||
|
||||
if (((BPF_MODE(insn->code) != BPF_MEM &&
|
||||
BPF_MODE(insn->code) != BPF_ATOMIC) || insn->imm != 0)) {
|
||||
verbose(env, "BPF_STX uses reserved fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (BPF_MODE(insn->code) == BPF_ATOMIC) {
|
||||
err = check_atomic(env, env->insn_idx, insn);
|
||||
if (err)
|
||||
@ -9542,6 +9552,11 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
|
||||
verbose(env, "BPF_STX uses reserved fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check src1 operand */
|
||||
err = check_reg_arg(env, insn->src_reg, SRC_OP);
|
||||
if (err)
|
||||
|
@ -173,6 +173,7 @@
|
||||
* Atomic operations:
|
||||
*
|
||||
* BPF_ADD *(uint *) (dst_reg + off16) += src_reg
|
||||
* BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
|
||||
*/
|
||||
|
||||
#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
|
||||
|
@ -44,6 +44,9 @@
|
||||
#define BPF_CALL 0x80 /* function call */
|
||||
#define BPF_EXIT 0x90 /* function return */
|
||||
|
||||
/* atomic op type fields (stored in immediate) */
|
||||
#define BPF_FETCH 0x01 /* fetch previous value into src reg */
|
||||
|
||||
/* Register numbers */
|
||||
enum {
|
||||
BPF_REG_0 = 0,
|
||||
|
Loading…
Reference in New Issue
Block a user