mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
bpf, sockmap: Allow inserting listening TCP sockets into sockmap
In order for sockmap/sockhash types to become generic collections for storing TCP sockets we need to loosen the checks during map update, while tightening the checks in redirect helpers. Currently sock{map,hash} require the TCP socket to be in established state, which prevents inserting listening sockets. Change the update pre-checks so the socket can also be in listening state. Since it doesn't make sense to redirect with sock{map,hash} to listening sockets, add appropriate socket state checks to BPF redirect helpers too. Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200218171023.844439-5-jakub@cloudflare.com
This commit is contained in:
parent
e80251555f
commit
8ca30379a4
@ -391,7 +391,8 @@ out_free:
|
||||
static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
|
||||
{
|
||||
return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
|
||||
ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
|
||||
ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
|
||||
ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
|
||||
}
|
||||
|
||||
static bool sock_map_sk_is_suitable(const struct sock *sk)
|
||||
@ -400,6 +401,16 @@ static bool sock_map_sk_is_suitable(const struct sock *sk)
|
||||
sk->sk_protocol == IPPROTO_TCP;
|
||||
}
|
||||
|
||||
static bool sock_map_sk_state_allowed(const struct sock *sk)
|
||||
{
|
||||
return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
|
||||
}
|
||||
|
||||
static bool sock_map_redirect_allowed(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_state != TCP_LISTEN;
|
||||
}
|
||||
|
||||
static int sock_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
@ -423,7 +434,7 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
|
||||
}
|
||||
|
||||
sock_map_sk_acquire(sk);
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
if (!sock_map_sk_state_allowed(sk))
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = sock_map_update_common(map, idx, sk, flags);
|
||||
@ -460,13 +471,17 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
|
||||
struct bpf_map *, map, u32, key, u64, flags)
|
||||
{
|
||||
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
|
||||
struct sock *sk;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
return SK_DROP;
|
||||
tcb->bpf.flags = flags;
|
||||
tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
|
||||
if (!tcb->bpf.sk_redir)
|
||||
|
||||
sk = __sock_map_lookup_elem(map, key);
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
|
||||
tcb->bpf.flags = flags;
|
||||
tcb->bpf.sk_redir = sk;
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
@ -483,12 +498,17 @@ const struct bpf_func_proto bpf_sk_redirect_map_proto = {
|
||||
BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
|
||||
struct bpf_map *, map, u32, key, u64, flags)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
return SK_DROP;
|
||||
msg->flags = flags;
|
||||
msg->sk_redir = __sock_map_lookup_elem(map, key);
|
||||
if (!msg->sk_redir)
|
||||
|
||||
sk = __sock_map_lookup_elem(map, key);
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
|
||||
msg->flags = flags;
|
||||
msg->sk_redir = sk;
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
@ -748,7 +768,7 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
|
||||
}
|
||||
|
||||
sock_map_sk_acquire(sk);
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
if (!sock_map_sk_state_allowed(sk))
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = sock_hash_update_common(map, key, sk, flags);
|
||||
@ -916,13 +936,17 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
|
||||
struct bpf_map *, map, void *, key, u64, flags)
|
||||
{
|
||||
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
|
||||
struct sock *sk;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
return SK_DROP;
|
||||
tcb->bpf.flags = flags;
|
||||
tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
|
||||
if (!tcb->bpf.sk_redir)
|
||||
|
||||
sk = __sock_hash_lookup_elem(map, key);
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
|
||||
tcb->bpf.flags = flags;
|
||||
tcb->bpf.sk_redir = sk;
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
@ -939,12 +963,17 @@ const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
|
||||
BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
|
||||
struct bpf_map *, map, void *, key, u64, flags)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
return SK_DROP;
|
||||
msg->flags = flags;
|
||||
msg->sk_redir = __sock_hash_lookup_elem(map, key);
|
||||
if (!msg->sk_redir)
|
||||
|
||||
sk = __sock_hash_lookup_elem(map, key);
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
|
||||
msg->flags = flags;
|
||||
msg->sk_redir = sk;
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
|
@ -756,11 +756,7 @@ static void test_sockmap(unsigned int tasks, void *data)
|
||||
/* Test update without programs */
|
||||
for (i = 0; i < 6; i++) {
|
||||
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
|
||||
if (i < 2 && !err) {
|
||||
printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
|
||||
i, sfd[i]);
|
||||
goto out_sockmap;
|
||||
} else if (i >= 2 && err) {
|
||||
if (err) {
|
||||
printf("Failed noprog update sockmap '%i:%i'\n",
|
||||
i, sfd[i]);
|
||||
goto out_sockmap;
|
||||
|
Loading…
Reference in New Issue
Block a user