Merge branch 'bpf: Build with -Wcast-function-type'
Kees Cook says: ==================== Hi, In order to keep ahead of cases in the kernel where Control Flow Integrity (CFI) may trip over function call casts, enabling -Wcast-function-type is helpful. To that end, replace BPF_CAST_CALL() as it triggers warnings with this option and is now one of the last places in the kernel in need of fixing. Thanks, -Kees v2: - rebase to bpf-next - add acks v1: https://lore.kernel.org/lkml/20210927182700.2980499-1-keescook@chromium.org ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
72e1781a5d
@ -48,6 +48,7 @@ extern struct idr btf_idr;
|
|||||||
extern spinlock_t btf_idr_lock;
|
extern spinlock_t btf_idr_lock;
|
||||||
extern struct kobject *btf_kobj;
|
extern struct kobject *btf_kobj;
|
||||||
|
|
||||||
|
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
|
||||||
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
|
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
|
||||||
struct bpf_iter_aux_info *aux);
|
struct bpf_iter_aux_info *aux);
|
||||||
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
|
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
|
||||||
@ -142,7 +143,8 @@ struct bpf_map_ops {
|
|||||||
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
|
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
|
||||||
struct bpf_func_state *caller,
|
struct bpf_func_state *caller,
|
||||||
struct bpf_func_state *callee);
|
struct bpf_func_state *callee);
|
||||||
int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
|
int (*map_for_each_callback)(struct bpf_map *map,
|
||||||
|
bpf_callback_t callback_fn,
|
||||||
void *callback_ctx, u64 flags);
|
void *callback_ctx, u64 flags);
|
||||||
|
|
||||||
/* BTF name and id of struct allocated by map_alloc */
|
/* BTF name and id of struct allocated by map_alloc */
|
||||||
|
@ -360,10 +360,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
|||||||
.off = 0, \
|
.off = 0, \
|
||||||
.imm = TGT })
|
.imm = TGT })
|
||||||
|
|
||||||
/* Function call */
|
/* Convert function address to BPF immediate */
|
||||||
|
|
||||||
#define BPF_CAST_CALL(x) \
|
#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
|
||||||
((u64 (*)(u64, u64, u64, u64, u64))(x))
|
|
||||||
|
|
||||||
#define BPF_EMIT_CALL(FUNC) \
|
#define BPF_EMIT_CALL(FUNC) \
|
||||||
((struct bpf_insn) { \
|
((struct bpf_insn) { \
|
||||||
@ -371,7 +370,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
|||||||
.dst_reg = 0, \
|
.dst_reg = 0, \
|
||||||
.src_reg = 0, \
|
.src_reg = 0, \
|
||||||
.off = 0, \
|
.off = 0, \
|
||||||
.imm = ((FUNC) - __bpf_call_base) })
|
.imm = BPF_CALL_IMM(FUNC) })
|
||||||
|
|
||||||
/* Raw code statement block */
|
/* Raw code statement block */
|
||||||
|
|
||||||
|
@ -645,7 +645,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
|
|||||||
.seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
|
.seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
|
||||||
};
|
};
|
||||||
|
|
||||||
static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
|
static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
|
||||||
void *callback_ctx, u64 flags)
|
void *callback_ctx, u64 flags)
|
||||||
{
|
{
|
||||||
u32 i, key, num_elems = 0;
|
u32 i, key, num_elems = 0;
|
||||||
@ -668,9 +668,8 @@ static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
|
|||||||
val = array->value + array->elem_size * i;
|
val = array->value + array->elem_size * i;
|
||||||
num_elems++;
|
num_elems++;
|
||||||
key = i;
|
key = i;
|
||||||
ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
|
ret = callback_fn((u64)(long)map, (u64)(long)&key,
|
||||||
(u64)(long)&key, (u64)(long)val,
|
(u64)(long)val, (u64)(long)callback_ctx, 0);
|
||||||
(u64)(long)callback_ctx, 0);
|
|
||||||
/* return value: 0 - continue, 1 - stop and return */
|
/* return value: 0 - continue, 1 - stop and return */
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
@ -668,7 +668,7 @@ static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
|||||||
|
|
||||||
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
|
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
|
||||||
(void *(*)(struct bpf_map *map, void *key))NULL));
|
(void *(*)(struct bpf_map *map, void *key))NULL));
|
||||||
*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
|
*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
|
||||||
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
|
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
|
||||||
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
|
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
|
||||||
offsetof(struct htab_elem, key) +
|
offsetof(struct htab_elem, key) +
|
||||||
@ -709,7 +709,7 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map,
|
|||||||
|
|
||||||
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
|
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
|
||||||
(void *(*)(struct bpf_map *map, void *key))NULL));
|
(void *(*)(struct bpf_map *map, void *key))NULL));
|
||||||
*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
|
*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
|
||||||
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
|
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
|
||||||
*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
|
*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
|
||||||
offsetof(struct htab_elem, lru_node) +
|
offsetof(struct htab_elem, lru_node) +
|
||||||
@ -2049,7 +2049,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
|
|||||||
.seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
|
.seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
|
||||||
};
|
};
|
||||||
|
|
||||||
static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
|
static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
|
||||||
void *callback_ctx, u64 flags)
|
void *callback_ctx, u64 flags)
|
||||||
{
|
{
|
||||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||||
@ -2089,9 +2089,8 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
|
|||||||
val = elem->key + roundup_key_size;
|
val = elem->key + roundup_key_size;
|
||||||
}
|
}
|
||||||
num_elems++;
|
num_elems++;
|
||||||
ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
|
ret = callback_fn((u64)(long)map, (u64)(long)key,
|
||||||
(u64)(long)key, (u64)(long)val,
|
(u64)(long)val, (u64)(long)callback_ctx, 0);
|
||||||
(u64)(long)callback_ctx, 0);
|
|
||||||
/* return value: 0 - continue, 1 - stop and return */
|
/* return value: 0 - continue, 1 - stop and return */
|
||||||
if (ret) {
|
if (ret) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -2397,7 +2396,7 @@ static int htab_of_map_gen_lookup(struct bpf_map *map,
|
|||||||
|
|
||||||
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
|
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
|
||||||
(void *(*)(struct bpf_map *map, void *key))NULL));
|
(void *(*)(struct bpf_map *map, void *key))NULL));
|
||||||
*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
|
*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
|
||||||
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
|
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
|
||||||
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
|
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
|
||||||
offsetof(struct htab_elem, key) +
|
offsetof(struct htab_elem, key) +
|
||||||
|
@ -1056,7 +1056,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
|
|||||||
struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
|
struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
|
||||||
struct bpf_map *map = t->map;
|
struct bpf_map *map = t->map;
|
||||||
void *value = t->value;
|
void *value = t->value;
|
||||||
void *callback_fn;
|
bpf_callback_t callback_fn;
|
||||||
void *key;
|
void *key;
|
||||||
u32 idx;
|
u32 idx;
|
||||||
|
|
||||||
@ -1081,8 +1081,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
|
|||||||
key = value - round_up(map->key_size, 8);
|
key = value - round_up(map->key_size, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
BPF_CAST_CALL(callback_fn)((u64)(long)map, (u64)(long)key,
|
callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
|
||||||
(u64)(long)value, 0, 0);
|
|
||||||
/* The verifier checked that return value is zero. */
|
/* The verifier checked that return value is zero. */
|
||||||
|
|
||||||
this_cpu_write(hrtimer_running, NULL);
|
this_cpu_write(hrtimer_running, NULL);
|
||||||
|
@ -1744,7 +1744,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
|||||||
|
|
||||||
desc = &tab->descs[tab->nr_descs++];
|
desc = &tab->descs[tab->nr_descs++];
|
||||||
desc->func_id = func_id;
|
desc->func_id = func_id;
|
||||||
desc->imm = BPF_CAST_CALL(addr) - __bpf_call_base;
|
desc->imm = BPF_CALL_IMM(addr);
|
||||||
err = btf_distill_func_proto(&env->log, btf_vmlinux,
|
err = btf_distill_func_proto(&env->log, btf_vmlinux,
|
||||||
func_proto, func_name,
|
func_proto, func_name,
|
||||||
&desc->func_model);
|
&desc->func_model);
|
||||||
@ -12514,8 +12514,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||||||
if (!bpf_pseudo_call(insn))
|
if (!bpf_pseudo_call(insn))
|
||||||
continue;
|
continue;
|
||||||
subprog = insn->off;
|
subprog = insn->off;
|
||||||
insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
|
insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
|
||||||
__bpf_call_base;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we use the aux data to keep a list of the start addresses
|
/* we use the aux data to keep a list of the start addresses
|
||||||
@ -12995,32 +12994,25 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||||||
patch_map_ops_generic:
|
patch_map_ops_generic:
|
||||||
switch (insn->imm) {
|
switch (insn->imm) {
|
||||||
case BPF_FUNC_map_lookup_elem:
|
case BPF_FUNC_map_lookup_elem:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
|
insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
|
||||||
__bpf_call_base;
|
|
||||||
continue;
|
continue;
|
||||||
case BPF_FUNC_map_update_elem:
|
case BPF_FUNC_map_update_elem:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
|
insn->imm = BPF_CALL_IMM(ops->map_update_elem);
|
||||||
__bpf_call_base;
|
|
||||||
continue;
|
continue;
|
||||||
case BPF_FUNC_map_delete_elem:
|
case BPF_FUNC_map_delete_elem:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
|
insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
|
||||||
__bpf_call_base;
|
|
||||||
continue;
|
continue;
|
||||||
case BPF_FUNC_map_push_elem:
|
case BPF_FUNC_map_push_elem:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
|
insn->imm = BPF_CALL_IMM(ops->map_push_elem);
|
||||||
__bpf_call_base;
|
|
||||||
continue;
|
continue;
|
||||||
case BPF_FUNC_map_pop_elem:
|
case BPF_FUNC_map_pop_elem:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
|
insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
|
||||||
__bpf_call_base;
|
|
||||||
continue;
|
continue;
|
||||||
case BPF_FUNC_map_peek_elem:
|
case BPF_FUNC_map_peek_elem:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
|
insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
|
||||||
__bpf_call_base;
|
|
||||||
continue;
|
continue;
|
||||||
case BPF_FUNC_redirect_map:
|
case BPF_FUNC_redirect_map:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_redirect) -
|
insn->imm = BPF_CALL_IMM(ops->map_redirect);
|
||||||
__bpf_call_base;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12439,7 +12439,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
|
|||||||
err = -EFAULT;
|
err = -EFAULT;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
*insn = BPF_EMIT_CALL(BPF_CAST_CALL(addr));
|
*insn = BPF_EMIT_CALL(addr);
|
||||||
if ((long)__bpf_call_base + insn->imm != addr)
|
if ((long)__bpf_call_base + insn->imm != addr)
|
||||||
*insn = BPF_JMP_A(0); /* Skip: NOP */
|
*insn = BPF_JMP_A(0); /* Skip: NOP */
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user