forked from Minki/linux
tcp: minor optimization in tcp ack fast path processing
Bitwise operation is a little faster. So I replace after() with using the flag FLAG_SND_UNA_ADVANCED as it is already set before. In addtion, there's another similar improvement in tcp_cwnd_reduction(). Cc: Joe Perches <joe@perches.com> Suggested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0cf3a68a53
commit
5e13a0d3f5
@ -2457,8 +2457,8 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
|
||||
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
|
||||
tp->prior_cwnd - 1;
|
||||
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
|
||||
} else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
|
||||
!(flag & FLAG_LOST_RETRANS)) {
|
||||
} else if ((flag & (FLAG_RETRANS_DATA_ACKED | FLAG_LOST_RETRANS)) ==
|
||||
FLAG_RETRANS_DATA_ACKED) {
|
||||
sndcnt = min_t(int, delta,
|
||||
max_t(int, tp->prr_delivered - tp->prr_out,
|
||||
newly_acked_sacked) + 1);
|
||||
@ -3610,7 +3610,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
if (flag & FLAG_UPDATE_TS_RECENT)
|
||||
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
|
||||
|
||||
if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
|
||||
if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
|
||||
FLAG_SND_UNA_ADVANCED) {
|
||||
/* Window is constant, pure forward advance.
|
||||
* No more checks are required.
|
||||
* Note, we use the fact that SND.UNA>=SND.WL2.
|
||||
|
Loading…
Reference in New Issue
Block a user