mptcp: reset subflow when MP_FAIL doesn't respond
This patch adds a new msk->flags bit MPTCP_FAIL_NO_RESPONSE, then reuses sk_timer to trigger a check if we have not received a response from the peer after sending MP_FAIL. If the peer doesn't respond properly, reset the subflow. Signed-off-by: Geliang Tang <geliang.tang@suse.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9c81be0dbc
commit
49fa1919d6
@ -287,6 +287,7 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
|
|||||||
{
|
{
|
||||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
|
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
|
||||||
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
||||||
|
struct sock *s = (struct sock *)msk;
|
||||||
|
|
||||||
pr_debug("fail_seq=%llu", fail_seq);
|
pr_debug("fail_seq=%llu", fail_seq);
|
||||||
|
|
||||||
@ -299,6 +300,13 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
|
|||||||
subflow->send_mp_fail = 1;
|
subflow->send_mp_fail = 1;
|
||||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
|
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
|
||||||
subflow->send_infinite_map = 1;
|
subflow->send_infinite_map = 1;
|
||||||
|
} else if (s && inet_sk_state_load(s) != TCP_CLOSE) {
|
||||||
|
pr_debug("MP_FAIL response received");
|
||||||
|
|
||||||
|
mptcp_data_lock(s);
|
||||||
|
if (inet_sk_state_load(s) != TCP_CLOSE)
|
||||||
|
sk_stop_timer(s, &s->sk_timer);
|
||||||
|
mptcp_data_unlock(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2169,10 +2169,38 @@ static void mptcp_retransmit_timer(struct timer_list *t)
|
|||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct mptcp_subflow_context *
|
||||||
|
mp_fail_response_expect_subflow(struct mptcp_sock *msk)
|
||||||
|
{
|
||||||
|
struct mptcp_subflow_context *subflow, *ret = NULL;
|
||||||
|
|
||||||
|
mptcp_for_each_subflow(msk, subflow) {
|
||||||
|
if (READ_ONCE(subflow->mp_fail_response_expect)) {
|
||||||
|
ret = subflow;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mptcp_check_mp_fail_response(struct mptcp_sock *msk)
|
||||||
|
{
|
||||||
|
struct mptcp_subflow_context *subflow;
|
||||||
|
struct sock *sk = (struct sock *)msk;
|
||||||
|
|
||||||
|
bh_lock_sock(sk);
|
||||||
|
subflow = mp_fail_response_expect_subflow(msk);
|
||||||
|
if (subflow)
|
||||||
|
__set_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags);
|
||||||
|
bh_unlock_sock(sk);
|
||||||
|
}
|
||||||
|
|
||||||
static void mptcp_timeout_timer(struct timer_list *t)
|
static void mptcp_timeout_timer(struct timer_list *t)
|
||||||
{
|
{
|
||||||
struct sock *sk = from_timer(sk, t, sk_timer);
|
struct sock *sk = from_timer(sk, t, sk_timer);
|
||||||
|
|
||||||
|
mptcp_check_mp_fail_response(mptcp_sk(sk));
|
||||||
mptcp_schedule_work(sk);
|
mptcp_schedule_work(sk);
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
}
|
}
|
||||||
@ -2499,6 +2527,23 @@ reset_timer:
|
|||||||
mptcp_data_unlock(sk);
|
mptcp_data_unlock(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
|
||||||
|
{
|
||||||
|
struct mptcp_subflow_context *subflow;
|
||||||
|
struct sock *ssk;
|
||||||
|
bool slow;
|
||||||
|
|
||||||
|
subflow = mp_fail_response_expect_subflow(msk);
|
||||||
|
if (subflow) {
|
||||||
|
pr_debug("MP_FAIL doesn't respond, reset the subflow");
|
||||||
|
|
||||||
|
ssk = mptcp_subflow_tcp_sock(subflow);
|
||||||
|
slow = lock_sock_fast(ssk);
|
||||||
|
mptcp_subflow_reset(ssk);
|
||||||
|
unlock_sock_fast(ssk, slow);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void mptcp_worker(struct work_struct *work)
|
static void mptcp_worker(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
|
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
|
||||||
@ -2539,6 +2584,9 @@ static void mptcp_worker(struct work_struct *work)
|
|||||||
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
|
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
|
||||||
__mptcp_retrans(sk);
|
__mptcp_retrans(sk);
|
||||||
|
|
||||||
|
if (test_and_clear_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags))
|
||||||
|
mptcp_mp_fail_no_response(msk);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
@ -116,6 +116,7 @@
|
|||||||
#define MPTCP_WORK_EOF 3
|
#define MPTCP_WORK_EOF 3
|
||||||
#define MPTCP_FALLBACK_DONE 4
|
#define MPTCP_FALLBACK_DONE 4
|
||||||
#define MPTCP_WORK_CLOSE_SUBFLOW 5
|
#define MPTCP_WORK_CLOSE_SUBFLOW 5
|
||||||
|
#define MPTCP_FAIL_NO_RESPONSE 6
|
||||||
|
|
||||||
/* MPTCP socket release cb flags */
|
/* MPTCP socket release cb flags */
|
||||||
#define MPTCP_PUSH_PENDING 1
|
#define MPTCP_PUSH_PENDING 1
|
||||||
|
@ -968,6 +968,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
|
|||||||
{
|
{
|
||||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||||
bool csum_reqd = READ_ONCE(msk->csum_enabled);
|
bool csum_reqd = READ_ONCE(msk->csum_enabled);
|
||||||
|
struct sock *sk = (struct sock *)msk;
|
||||||
struct mptcp_ext *mpext;
|
struct mptcp_ext *mpext;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u16 data_len;
|
u16 data_len;
|
||||||
@ -1009,6 +1010,12 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
|
|||||||
pr_debug("infinite mapping received");
|
pr_debug("infinite mapping received");
|
||||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
|
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
|
||||||
subflow->map_data_len = 0;
|
subflow->map_data_len = 0;
|
||||||
|
if (sk && inet_sk_state_load(sk) != TCP_CLOSE) {
|
||||||
|
mptcp_data_lock(sk);
|
||||||
|
if (inet_sk_state_load(sk) != TCP_CLOSE)
|
||||||
|
sk_stop_timer(sk, &sk->sk_timer);
|
||||||
|
mptcp_data_unlock(sk);
|
||||||
|
}
|
||||||
return MAPPING_INVALID;
|
return MAPPING_INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1219,6 +1226,10 @@ fallback:
|
|||||||
sk_eat_skb(ssk, skb);
|
sk_eat_skb(ssk, skb);
|
||||||
} else {
|
} else {
|
||||||
WRITE_ONCE(subflow->mp_fail_response_expect, true);
|
WRITE_ONCE(subflow->mp_fail_response_expect, true);
|
||||||
|
/* The data lock is acquired in __mptcp_move_skbs() */
|
||||||
|
sk_reset_timer((struct sock *)msk,
|
||||||
|
&((struct sock *)msk)->sk_timer,
|
||||||
|
jiffies + TCP_RTO_MAX);
|
||||||
}
|
}
|
||||||
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
|
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
|
||||||
return true;
|
return true;
|
||||||
|
Loading…
Reference in New Issue
Block a user