forked from Minki/linux
bpf: sockmap: Allow update from BPF
Allow calling bpf_map_update_elem on sockmap and sockhash from a BPF context. The synchronization required for this is a bit fiddly: we need to prevent the socket from changing its state while we add it to the sockmap, since we rely on getting a callback via sk_prot->unhash. However, we can't just lock_sock like in sock_map_sk_acquire because that might sleep. So instead we disable softirq processing and use bh_lock_sock to prevent further modification. Yet, this is still not enough. BPF can be called in contexts where the current CPU might have locked a socket. If the BPF can get a hold of such a socket, inserting it into a sockmap would lead to a deadlock. One straight forward example are sock_ops programs that have ctx->sk, but the same problem exists for kprobes, etc. We deal with this by allowing sockmap updates only from known safe contexts. Improper usage is rejected by the verifier. I've audited the enabled contexts to make sure they can't run in a locked context. It's possible that CGROUP_SKB and others are safe as well, but the auditing here is much more difficult. In any case, we can extend the safe contexts when the need arises. Signed-off-by: Lorenz Bauer <lmb@cloudflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20200821102948.21918-6-lmb@cloudflare.com
This commit is contained in:
parent
912f442cfb
commit
0126240f44
@ -4178,6 +4178,38 @@ err_type:
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
|
||||
{
|
||||
enum bpf_attach_type eatype = env->prog->expected_attach_type;
|
||||
enum bpf_prog_type type = env->prog->type;
|
||||
|
||||
if (func_id != BPF_FUNC_map_update_elem)
|
||||
return false;
|
||||
|
||||
/* It's not possible to get access to a locked struct sock in these
|
||||
* contexts, so updating is safe.
|
||||
*/
|
||||
switch (type) {
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
if (eatype == BPF_TRACE_ITER)
|
||||
return true;
|
||||
break;
|
||||
case BPF_PROG_TYPE_SOCKET_FILTER:
|
||||
case BPF_PROG_TYPE_SCHED_CLS:
|
||||
case BPF_PROG_TYPE_SCHED_ACT:
|
||||
case BPF_PROG_TYPE_XDP:
|
||||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
case BPF_PROG_TYPE_SK_LOOKUP:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
verbose(env, "cannot update sockmap in this context\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
struct bpf_map *map, int func_id)
|
||||
{
|
||||
@ -4249,7 +4281,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
func_id != BPF_FUNC_map_delete_elem &&
|
||||
func_id != BPF_FUNC_msg_redirect_map &&
|
||||
func_id != BPF_FUNC_sk_select_reuseport &&
|
||||
func_id != BPF_FUNC_map_lookup_elem)
|
||||
func_id != BPF_FUNC_map_lookup_elem &&
|
||||
!may_update_sockmap(env, func_id))
|
||||
goto error;
|
||||
break;
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
@ -4258,7 +4291,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
func_id != BPF_FUNC_map_delete_elem &&
|
||||
func_id != BPF_FUNC_msg_redirect_hash &&
|
||||
func_id != BPF_FUNC_sk_select_reuseport &&
|
||||
func_id != BPF_FUNC_map_lookup_elem)
|
||||
func_id != BPF_FUNC_map_lookup_elem &&
|
||||
!may_update_sockmap(env, func_id))
|
||||
goto error;
|
||||
break;
|
||||
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
|
||||
|
@ -603,6 +603,28 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sock_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
struct sock *sk = (struct sock *)value;
|
||||
int ret;
|
||||
|
||||
if (!sock_map_sk_is_suitable(sk))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
local_bh_disable();
|
||||
bh_lock_sock(sk);
|
||||
if (!sock_map_sk_state_allowed(sk))
|
||||
ret = -EOPNOTSUPP;
|
||||
else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
|
||||
ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
|
||||
else
|
||||
ret = sock_hash_update_common(map, key, sk, flags);
|
||||
bh_unlock_sock(sk);
|
||||
local_bh_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
|
||||
struct bpf_map *, map, void *, key, u64, flags)
|
||||
{
|
||||
@ -687,6 +709,7 @@ const struct bpf_map_ops sock_map_ops = {
|
||||
.map_free = sock_map_free,
|
||||
.map_get_next_key = sock_map_get_next_key,
|
||||
.map_lookup_elem_sys_only = sock_map_lookup_sys,
|
||||
.map_update_elem = sock_map_update_elem,
|
||||
.map_delete_elem = sock_map_delete_elem,
|
||||
.map_lookup_elem = sock_map_lookup,
|
||||
.map_release_uref = sock_map_release_progs,
|
||||
@ -1180,6 +1203,7 @@ const struct bpf_map_ops sock_hash_ops = {
|
||||
.map_alloc = sock_hash_alloc,
|
||||
.map_free = sock_hash_free,
|
||||
.map_get_next_key = sock_hash_get_next_key,
|
||||
.map_update_elem = sock_map_update_elem,
|
||||
.map_delete_elem = sock_hash_delete_elem,
|
||||
.map_lookup_elem = sock_hash_lookup,
|
||||
.map_lookup_elem_sys_only = sock_hash_lookup_sys,
|
||||
|
Loading…
Reference in New Issue
Block a user