mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
Merge branch 'mptcp-locking-fixes'
Mat Martineau says: ==================== mptcp: Locking fixes Two separate locking fixes for the networking tree: Patch 1 addresses a MPTCP fastopen error-path deadlock that was found with syzkaller. Patch 2 works around a lockdep false-positive between MPTCP listening and non-listening sockets at socket destruct time. ==================== Link: https://lore.kernel.org/r/20221220195215.238353-1-mathew.j.martineau@linux.intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
43ae218f69
@ -1662,6 +1662,8 @@ static void mptcp_set_nospace(struct sock *sk)
|
||||
set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
|
||||
}
|
||||
|
||||
static int mptcp_disconnect(struct sock *sk, int flags);
|
||||
|
||||
static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msghdr *msg,
|
||||
size_t len, int *copied_syn)
|
||||
{
|
||||
@ -1672,9 +1674,9 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
|
||||
lock_sock(ssk);
|
||||
msg->msg_flags |= MSG_DONTWAIT;
|
||||
msk->connect_flags = O_NONBLOCK;
|
||||
msk->is_sendmsg = 1;
|
||||
msk->fastopening = 1;
|
||||
ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
|
||||
msk->is_sendmsg = 0;
|
||||
msk->fastopening = 0;
|
||||
msg->msg_flags = saved_flags;
|
||||
release_sock(ssk);
|
||||
|
||||
@ -1688,6 +1690,8 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
|
||||
*/
|
||||
if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
|
||||
*copied_syn = 0;
|
||||
} else if (ret && ret != -EINPROGRESS) {
|
||||
mptcp_disconnect(sk, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2353,7 +2357,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
||||
/* otherwise tcp will dispose of the ssk and subflow ctx */
|
||||
if (ssk->sk_state == TCP_LISTEN) {
|
||||
tcp_set_state(ssk, TCP_CLOSE);
|
||||
mptcp_subflow_queue_clean(ssk);
|
||||
mptcp_subflow_queue_clean(sk, ssk);
|
||||
inet_csk_listen_stop(ssk);
|
||||
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
|
||||
}
|
||||
@ -2989,6 +2993,14 @@ static int mptcp_disconnect(struct sock *sk, int flags)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
|
||||
/* We are on the fastopen error path. We can't call straight into the
|
||||
* subflows cleanup code due to lock nesting (we are already under
|
||||
* msk->firstsocket lock). Do nothing and leave the cleanup to the
|
||||
* caller.
|
||||
*/
|
||||
if (msk->fastopening)
|
||||
return 0;
|
||||
|
||||
inet_sk_state_store(sk, TCP_CLOSE);
|
||||
|
||||
mptcp_stop_timer(sk);
|
||||
@ -3532,7 +3544,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
/* if reaching here via the fastopen/sendmsg path, the caller already
|
||||
* acquired the subflow socket lock, too.
|
||||
*/
|
||||
if (msk->is_sendmsg)
|
||||
if (msk->fastopening)
|
||||
err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
|
||||
else
|
||||
err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
|
||||
|
@ -295,7 +295,7 @@ struct mptcp_sock {
|
||||
u8 recvmsg_inq:1,
|
||||
cork:1,
|
||||
nodelay:1,
|
||||
is_sendmsg:1;
|
||||
fastopening:1;
|
||||
int connect_flags;
|
||||
struct work_struct work;
|
||||
struct sk_buff *ooo_last_skb;
|
||||
@ -628,7 +628,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
||||
struct mptcp_subflow_context *subflow);
|
||||
void __mptcp_subflow_send_ack(struct sock *ssk);
|
||||
void mptcp_subflow_reset(struct sock *ssk);
|
||||
void mptcp_subflow_queue_clean(struct sock *ssk);
|
||||
void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
|
||||
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
|
||||
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
|
||||
bool __mptcp_close(struct sock *sk, long timeout);
|
||||
|
@ -1791,7 +1791,7 @@ static void subflow_state_change(struct sock *sk)
|
||||
}
|
||||
}
|
||||
|
||||
void mptcp_subflow_queue_clean(struct sock *listener_ssk)
|
||||
void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
|
||||
{
|
||||
struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
|
||||
struct mptcp_sock *msk, *next, *head = NULL;
|
||||
@ -1840,8 +1840,23 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
|
||||
|
||||
do_cancel_work = __mptcp_close(sk, 0);
|
||||
release_sock(sk);
|
||||
if (do_cancel_work)
|
||||
if (do_cancel_work) {
|
||||
/* lockdep will report a false positive ABBA deadlock
|
||||
* between cancel_work_sync and the listener socket.
|
||||
* The involved locks belong to different sockets WRT
|
||||
* the existing AB chain.
|
||||
* Using a per socket key is problematic as key
|
||||
* deregistration requires process context and must be
|
||||
* performed at socket disposal time, in atomic
|
||||
* context.
|
||||
* Just tell lockdep to consider the listener socket
|
||||
* released here.
|
||||
*/
|
||||
mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
|
||||
mptcp_cancel_work(sk);
|
||||
mutex_acquire(&listener_sk->sk_lock.dep_map,
|
||||
SINGLE_DEPTH_NESTING, 0, _RET_IP_);
|
||||
}
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user