mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
1c2709cfff
We discovered from packet traces of slow loss recovery on kernels with
the default HZ=250 setting (and min_rtt < 1ms) that after reordering,
when receiving a SACKed sequence range, the RACK reordering timer was
firing after about 16ms rather than the desired value of roughly
min_rtt/4 + 2ms. The problem is largely due to the RACK reorder timer
calculation adding in TCP_TIMEOUT_MIN, which is 2 jiffies. On kernels
with HZ=250, this is 2*4ms = 8ms. The TLP timer calculation has the
exact same issue.
This commit fixes the TLP transmit timer and RACK reordering timer
floor calculation to more closely match the intended 2ms floor even on
kernels with HZ=250. It does this by adding in a new
TCP_TIMEOUT_MIN_US floor of 2000 us and then converting to jiffies,
instead of the current approach of converting to jiffies and then
adding th TCP_TIMEOUT_MIN value of 2 jiffies.
Our testing has verified that on kernels with HZ=1000, as expected,
this does not produce significant changes in behavior, but on kernels
with the default HZ=250 the latency improvement can be large. For
example, our tests show that for HZ=250 kernels at low RTTs this fix
roughly halves the latency for the RACK reorder timer: instead of
mostly firing at 16ms it mostly fires at 8ms.
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Fixes: bb4d991a28
("tcp: adjust tail loss probe timeout")
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20231015174700.2206872-1-ncardwell.sw@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
238 lines
7.6 KiB
C
238 lines
7.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/tcp.h>
|
|
#include <net/tcp.h>
|
|
|
|
static u32 tcp_rack_reo_wnd(const struct sock *sk)
|
|
{
|
|
const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
if (!tp->reord_seen) {
|
|
/* If reordering has not been observed, be aggressive during
|
|
* the recovery or starting the recovery by DUPACK threshold.
|
|
*/
|
|
if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
|
|
return 0;
|
|
|
|
if (tp->sacked_out >= tp->reordering &&
|
|
!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
|
|
TCP_RACK_NO_DUPTHRESH))
|
|
return 0;
|
|
}
|
|
|
|
/* To be more reordering resilient, allow min_rtt/4 settling delay.
|
|
* Use min_rtt instead of the smoothed RTT because reordering is
|
|
* often a path property and less related to queuing or delayed ACKs.
|
|
* Upon receiving DSACKs, linearly increase the window up to the
|
|
* smoothed RTT.
|
|
*/
|
|
return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
|
|
tp->srtt_us >> 3);
|
|
}
|
|
|
|
s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
|
|
{
|
|
return tp->rack.rtt_us + reo_wnd -
|
|
tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
|
|
}
|
|
|
|
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
|
|
*
|
|
* Marks a packet lost, if some packet sent later has been (s)acked.
|
|
* The underlying idea is similar to the traditional dupthresh and FACK
|
|
* but they look at different metrics:
|
|
*
|
|
* dupthresh: 3 OOO packets delivered (packet count)
|
|
* FACK: sequence delta to highest sacked sequence (sequence space)
|
|
* RACK: sent time delta to the latest delivered packet (time domain)
|
|
*
|
|
* The advantage of RACK is it applies to both original and retransmitted
|
|
* packet and therefore is robust against tail losses. Another advantage
|
|
* is being more resilient to reordering by simply allowing some
|
|
* "settling delay", instead of tweaking the dupthresh.
|
|
*
|
|
* When tcp_rack_detect_loss() detects some packets are lost and we
|
|
* are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
|
|
* or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
|
|
* make us enter the CA_Recovery state.
|
|
*/
|
|
static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct sk_buff *skb, *n;
|
|
u32 reo_wnd;
|
|
|
|
*reo_timeout = 0;
|
|
reo_wnd = tcp_rack_reo_wnd(sk);
|
|
list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
|
|
tcp_tsorted_anchor) {
|
|
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
|
|
s32 remaining;
|
|
|
|
/* Skip ones marked lost but not yet retransmitted */
|
|
if ((scb->sacked & TCPCB_LOST) &&
|
|
!(scb->sacked & TCPCB_SACKED_RETRANS))
|
|
continue;
|
|
|
|
if (!tcp_skb_sent_after(tp->rack.mstamp,
|
|
tcp_skb_timestamp_us(skb),
|
|
tp->rack.end_seq, scb->end_seq))
|
|
break;
|
|
|
|
/* A packet is lost if it has not been s/acked beyond
|
|
* the recent RTT plus the reordering window.
|
|
*/
|
|
remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
|
|
if (remaining <= 0) {
|
|
tcp_mark_skb_lost(sk, skb);
|
|
list_del_init(&skb->tcp_tsorted_anchor);
|
|
} else {
|
|
/* Record maximum wait time */
|
|
*reo_timeout = max_t(u32, *reo_timeout, remaining);
|
|
}
|
|
}
|
|
}
|
|
|
|
bool tcp_rack_mark_lost(struct sock *sk)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
u32 timeout;
|
|
|
|
if (!tp->rack.advanced)
|
|
return false;
|
|
|
|
/* Reset the advanced flag to avoid unnecessary queue scanning */
|
|
tp->rack.advanced = 0;
|
|
tcp_rack_detect_loss(sk, &timeout);
|
|
if (timeout) {
|
|
timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
|
|
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
|
|
timeout, inet_csk(sk)->icsk_rto);
|
|
}
|
|
return !!timeout;
|
|
}
|
|
|
|
/* Record the most recently (re)sent time among the (s)acked packets
|
|
* This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
|
|
* draft-cheng-tcpm-rack-00.txt
|
|
*/
|
|
void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
|
|
u64 xmit_time)
|
|
{
|
|
u32 rtt_us;
|
|
|
|
rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
|
|
if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
|
|
/* If the sacked packet was retransmitted, it's ambiguous
|
|
* whether the retransmission or the original (or the prior
|
|
* retransmission) was sacked.
|
|
*
|
|
* If the original is lost, there is no ambiguity. Otherwise
|
|
* we assume the original can be delayed up to aRTT + min_rtt.
|
|
* the aRTT term is bounded by the fast recovery or timeout,
|
|
* so it's at least one RTT (i.e., retransmission is at least
|
|
* an RTT later).
|
|
*/
|
|
return;
|
|
}
|
|
tp->rack.advanced = 1;
|
|
tp->rack.rtt_us = rtt_us;
|
|
if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
|
|
end_seq, tp->rack.end_seq)) {
|
|
tp->rack.mstamp = xmit_time;
|
|
tp->rack.end_seq = end_seq;
|
|
}
|
|
}
|
|
|
|
/* We have waited long enough to accommodate reordering. Mark the expired
|
|
* packets lost and retransmit them.
|
|
*/
|
|
void tcp_rack_reo_timeout(struct sock *sk)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
u32 timeout, prior_inflight;
|
|
u32 lost = tp->lost;
|
|
|
|
prior_inflight = tcp_packets_in_flight(tp);
|
|
tcp_rack_detect_loss(sk, &timeout);
|
|
if (prior_inflight != tcp_packets_in_flight(tp)) {
|
|
if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
|
|
tcp_enter_recovery(sk, false);
|
|
if (!inet_csk(sk)->icsk_ca_ops->cong_control)
|
|
tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
|
|
}
|
|
tcp_xmit_retransmit_queue(sk);
|
|
}
|
|
if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
|
|
tcp_rearm_rto(sk);
|
|
}
|
|
|
|
/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
|
|
*
|
|
* If a DSACK is received that seems like it may have been due to reordering
|
|
* triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded
|
|
* by srtt), since there is possibility that spurious retransmission was
|
|
* due to reordering delay longer than reo_wnd.
|
|
*
|
|
* Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
|
|
* no. of successful recoveries (accounts for full DSACK-based loss
|
|
* recovery undo). After that, reset it to default (min_rtt/4).
|
|
*
|
|
* At max, reo_wnd is incremented only once per rtt. So that the new
|
|
* DSACK on which we are reacting, is due to the spurious retx (approx)
|
|
* after the reo_wnd has been updated last time.
|
|
*
|
|
* reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
|
|
* absolute value to account for change in rtt.
|
|
*/
|
|
void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
|
|
TCP_RACK_STATIC_REO_WND) ||
|
|
!rs->prior_delivered)
|
|
return;
|
|
|
|
/* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
|
|
if (before(rs->prior_delivered, tp->rack.last_delivered))
|
|
tp->rack.dsack_seen = 0;
|
|
|
|
/* Adjust the reo_wnd if update is pending */
|
|
if (tp->rack.dsack_seen) {
|
|
tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
|
|
tp->rack.reo_wnd_steps + 1);
|
|
tp->rack.dsack_seen = 0;
|
|
tp->rack.last_delivered = tp->delivered;
|
|
tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
|
|
} else if (!tp->rack.reo_wnd_persist) {
|
|
tp->rack.reo_wnd_steps = 1;
|
|
}
|
|
}
|
|
|
|
/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
|
|
* the next unacked packet upon receiving
|
|
* a) three or more DUPACKs to start the fast recovery
|
|
* b) an ACK acknowledging new data during the fast recovery.
|
|
*/
|
|
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
|
|
{
|
|
const u8 state = inet_csk(sk)->icsk_ca_state;
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
|
|
(state == TCP_CA_Recovery && snd_una_advanced)) {
|
|
struct sk_buff *skb = tcp_rtx_queue_head(sk);
|
|
u32 mss;
|
|
|
|
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
|
|
return;
|
|
|
|
mss = tcp_skb_mss(skb);
|
|
if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
|
|
tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
|
|
mss, mss, GFP_ATOMIC);
|
|
|
|
tcp_mark_skb_lost(sk, skb);
|
|
}
|
|
}
|