forked from Minki/linux
mptcp: less aggressive retransmission strategy
The current mptcp re-inject strategy is very aggressive, we have mptcp-level retransmissions even on single subflow connection, if the link in-use is lossy. Let's be a little more conservative: we do retransmit only if at least a subflow has write and rtx queue empty. Additionally use the backup subflows only if the active subflows are stale - no progresses in at least an rtx period and ignore stale subflows for rtx timeout update Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/207 Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
33d41c9cd7
commit
71b7dec27f
@ -308,6 +308,23 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
|
||||
return mptcp_pm_nl_get_local_id(msk, skc);
|
||||
}
|
||||
|
||||
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
|
||||
|
||||
/* keep track of rtx periods with no progress */
|
||||
if (!subflow->stale_count) {
|
||||
subflow->stale_rcv_tstamp = rcv_tstamp;
|
||||
subflow->stale_count++;
|
||||
} else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
|
||||
if (subflow->stale_count < U8_MAX)
|
||||
subflow->stale_count++;
|
||||
} else {
|
||||
subflow->stale_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void mptcp_pm_data_init(struct mptcp_sock *msk)
|
||||
{
|
||||
msk->pm.add_addr_signaled = 0;
|
||||
|
@ -420,7 +420,8 @@ static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subfl
|
||||
{
|
||||
const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
|
||||
return inet_csk(ssk)->icsk_pending ? inet_csk(ssk)->icsk_timeout - jiffies : 0;
|
||||
return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
|
||||
inet_csk(ssk)->icsk_timeout - jiffies : 0;
|
||||
}
|
||||
|
||||
static void mptcp_set_timeout(struct sock *sk)
|
||||
@ -2100,8 +2101,9 @@ static void mptcp_timeout_timer(struct timer_list *t)
|
||||
*/
|
||||
static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
|
||||
{
|
||||
struct sock *backup = NULL, *pick = NULL;
|
||||
struct mptcp_subflow_context *subflow;
|
||||
struct sock *backup = NULL;
|
||||
int min_stale_count = INT_MAX;
|
||||
|
||||
sock_owned_by_me((const struct sock *)msk);
|
||||
|
||||
@ -2114,11 +2116,11 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
|
||||
if (!mptcp_subflow_active(subflow))
|
||||
continue;
|
||||
|
||||
/* still data outstanding at TCP level? Don't retransmit. */
|
||||
if (!tcp_write_queue_empty(ssk)) {
|
||||
if (inet_csk(ssk)->icsk_ca_state >= TCP_CA_Loss)
|
||||
continue;
|
||||
return NULL;
|
||||
/* still data outstanding at TCP level? skip this */
|
||||
if (!tcp_rtx_and_write_queues_empty(ssk)) {
|
||||
mptcp_pm_subflow_chk_stale(msk, ssk);
|
||||
min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (subflow->backup) {
|
||||
@ -2127,10 +2129,15 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
|
||||
continue;
|
||||
}
|
||||
|
||||
return ssk;
|
||||
if (!pick)
|
||||
pick = ssk;
|
||||
}
|
||||
|
||||
return backup;
|
||||
if (pick)
|
||||
return pick;
|
||||
|
||||
/* use backup only if there are no progresses anywhere */
|
||||
return min_stale_count > 1 ? backup : NULL;
|
||||
}
|
||||
|
||||
static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
|
||||
|
@ -439,11 +439,13 @@ struct mptcp_subflow_context {
|
||||
u8 reset_seen:1;
|
||||
u8 reset_transient:1;
|
||||
u8 reset_reason:4;
|
||||
u8 stale_count;
|
||||
|
||||
long delegated_status;
|
||||
struct list_head delegated_node; /* link into delegated_action, protected by local BH */
|
||||
|
||||
u32 setsockopt_seq;
|
||||
u32 setsockopt_seq;
|
||||
u32 stale_rcv_tstamp;
|
||||
|
||||
struct sock *tcp_sock; /* tcp sk backpointer */
|
||||
struct sock *conn; /* parent mptcp_sock */
|
||||
@ -690,6 +692,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
|
||||
|
||||
void __init mptcp_pm_init(void);
|
||||
void mptcp_pm_data_init(struct mptcp_sock *msk);
|
||||
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
|
||||
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
|
||||
void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp);
|
||||
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
|
||||
|
Loading…
Reference in New Issue
Block a user