forked from Minki/linux
Merge branch 'tcp-mem-pressure-vs-SO_RCVLOWAT'
Eric Dumazet says: ==================== tcp: mem pressure vs SO_RCVLOWAT First patch fixes an issue for applications using SO_RCVLOWAT to reduce context switches. Second patch is a cleanup. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
762d17b991
@ -1431,12 +1431,29 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied);
|
||||
*/
|
||||
static inline bool tcp_rmem_pressure(const struct sock *sk)
|
||||
{
|
||||
int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
|
||||
int threshold = rcvbuf - (rcvbuf >> 3);
|
||||
int rcvbuf, threshold;
|
||||
|
||||
if (tcp_under_memory_pressure(sk))
|
||||
return true;
|
||||
|
||||
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
|
||||
threshold = rcvbuf - (rcvbuf >> 3);
|
||||
|
||||
return atomic_read(&sk->sk_rmem_alloc) > threshold;
|
||||
}
|
||||
|
||||
static inline bool tcp_epollin_ready(const struct sock *sk, int target)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
|
||||
|
||||
if (avail <= 0)
|
||||
return false;
|
||||
|
||||
return (avail >= target) || tcp_rmem_pressure(sk) ||
|
||||
(tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
|
||||
}
|
||||
|
||||
extern void tcp_openreq_init_rwin(struct request_sock *req,
|
||||
const struct sock *sk_listener,
|
||||
const struct dst_entry *dst);
|
||||
|
@ -481,19 +481,11 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
|
||||
int target, struct sock *sk)
|
||||
static bool tcp_stream_is_readable(struct sock *sk, int target)
|
||||
{
|
||||
int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
|
||||
if (tcp_epollin_ready(sk, target))
|
||||
return true;
|
||||
|
||||
if (avail > 0) {
|
||||
if (avail >= target)
|
||||
return true;
|
||||
if (tcp_rmem_pressure(sk))
|
||||
return true;
|
||||
if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss)
|
||||
return true;
|
||||
}
|
||||
if (sk->sk_prot->stream_memory_read)
|
||||
return sk->sk_prot->stream_memory_read(sk);
|
||||
return false;
|
||||
@ -568,7 +560,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
tp->urg_data)
|
||||
target++;
|
||||
|
||||
if (tcp_stream_is_readable(tp, target, sk))
|
||||
if (tcp_stream_is_readable(sk, target))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
|
||||
|
@ -4924,15 +4924,8 @@ err:
|
||||
|
||||
void tcp_data_ready(struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
int avail = tp->rcv_nxt - tp->copied_seq;
|
||||
|
||||
if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
|
||||
!sock_flag(sk, SOCK_DONE) &&
|
||||
tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss)
|
||||
return;
|
||||
|
||||
sk->sk_data_ready(sk);
|
||||
if (tcp_epollin_ready(sk, sk->sk_rcvlowat))
|
||||
sk->sk_data_ready(sk);
|
||||
}
|
||||
|
||||
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||
|
Loading…
Reference in New Issue
Block a user