forked from Minki/linux
bpf, sockmap: Fix memleak on ingress msg enqueue
If backlog handler is running during a tear down operation we may enqueue
data on the ingress msg queue while tear down is trying to free it.
sk_psock_backlog()
sk_psock_handle_skb()
skb_psock_skb_ingress()
sk_psock_skb_ingress_enqueue()
sk_psock_queue_msg(psock,msg)
spin_lock(ingress_lock)
sk_psock_zap_ingress()
_sk_psock_purge_ingerss_msg()
_sk_psock_purge_ingress_msg()
-- free ingress_msg list --
spin_unlock(ingress_lock)
spin_lock(ingress_lock)
list_add_tail(msg,ingress_msg) <- entry on list with no one
left to free it.
spin_unlock(ingress_lock)
To fix we only enqueue from backlog if the ENABLED bit is set. The tear
down logic clears the bit with ingress_lock set so we wont enqueue the
msg in the last step.
Fixes: 799aa7f98d
("skmsg: Avoid lock_sock() in sk_psock_backlog()")
Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210727160500.1713554-4-john.fastabend@gmail.com
This commit is contained in:
parent
476d98018f
commit
9635720b7c
@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
|
||||
return rcu_dereference_sk_user_data(sk);
|
||||
}
|
||||
|
||||
static inline void sk_psock_set_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
set_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline void sk_psock_clear_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
clear_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline bool sk_psock_test_state(const struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
return test_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sk_drops_add(sk, skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg)
|
||||
{
|
||||
if (msg->skb)
|
||||
sock_drop(psock->sk, msg->skb);
|
||||
kfree(msg);
|
||||
}
|
||||
|
||||
static inline void sk_psock_queue_msg(struct sk_psock *psock,
|
||||
struct sk_msg *msg)
|
||||
{
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
list_add_tail(&msg->list, &psock->ingress_msg);
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
list_add_tail(&msg->list, &psock->ingress_msg);
|
||||
else
|
||||
drop_sk_msg(psock, msg);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk,
|
||||
psock->psock_update_sk_prot(sk, psock, true);
|
||||
}
|
||||
|
||||
static inline void sk_psock_set_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
set_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline void sk_psock_clear_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
clear_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline bool sk_psock_test_state(const struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
return test_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline struct sk_psock *sk_psock_get(struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
|
@ -584,12 +584,6 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
|
||||
return sk_psock_skb_ingress(psock, skb);
|
||||
}
|
||||
|
||||
static void sock_drop(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sk_drops_add(sk, skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void sk_psock_skb_state(struct sk_psock *psock,
|
||||
struct sk_psock_work_state *state,
|
||||
struct sk_buff *skb,
|
||||
|
Loading…
Reference in New Issue
Block a user