tcp: remove sk_{tr}x_skb_cache
This reverts the following patches : - commit2e05fcae83("tcp: fix compile error if !CONFIG_SYSCTL") - commit4f661542a4("tcp: fix zerocopy and notsent_lowat issues") - commit472c2e07ee("tcp: add one skb cache for tx") - commit8b27dae5a2("tcp: add one skb cache for rx") Having a cache of one skb (in each direction) per TCP socket is fragile, since it can cause a significant increase of memory needs, and not good enough for high speed flows anyway where more than one skb is needed. We want instead to add a generic infrastructure, with more flexible per-cpu caches, for alien NUMA nodes. Acked-by: Paolo Abeni <pabeni@redhat.com> Acked-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
ff6fb083a0
commit
d8b81175e4
@@ -262,7 +262,6 @@ struct bpf_local_storage;
|
||||
* @sk_dst_cache: destination cache
|
||||
* @sk_dst_pending_confirm: need to confirm neighbour
|
||||
* @sk_policy: flow policy
|
||||
* @sk_rx_skb_cache: cache copy of recently accessed RX skb
|
||||
* @sk_receive_queue: incoming packets
|
||||
* @sk_wmem_alloc: transmit queue bytes committed
|
||||
* @sk_tsq_flags: TCP Small Queues flags
|
||||
@@ -328,7 +327,6 @@ struct bpf_local_storage;
|
||||
* @sk_peek_off: current peek_offset value
|
||||
* @sk_send_head: front of stuff to transmit
|
||||
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
|
||||
* @sk_tx_skb_cache: cache copy of recently accessed TX skb
|
||||
* @sk_security: used by security modules
|
||||
* @sk_mark: generic packet mark
|
||||
* @sk_cgrp_data: cgroup data for this cgroup
|
||||
@@ -393,7 +391,6 @@ struct sock {
|
||||
atomic_t sk_drops;
|
||||
int sk_rcvlowat;
|
||||
struct sk_buff_head sk_error_queue;
|
||||
struct sk_buff *sk_rx_skb_cache;
|
||||
struct sk_buff_head sk_receive_queue;
|
||||
/*
|
||||
* The backlog queue is special, it is always used with
|
||||
@@ -442,7 +439,6 @@ struct sock {
|
||||
struct sk_buff *sk_send_head;
|
||||
struct rb_root tcp_rtx_queue;
|
||||
};
|
||||
struct sk_buff *sk_tx_skb_cache;
|
||||
struct sk_buff_head sk_write_queue;
|
||||
__s32 sk_peek_off;
|
||||
int sk_write_pending;
|
||||
@@ -1555,18 +1551,10 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
|
||||
__sk_mem_reclaim(sk, 1 << 20);
|
||||
}
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
|
||||
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sk_wmem_queued_add(sk, -skb->truesize);
|
||||
sk_mem_uncharge(sk, skb->truesize);
|
||||
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
|
||||
!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
|
||||
skb_ext_reset(skb);
|
||||
skb_zcopy_clear(skb, true);
|
||||
sk->sk_tx_skb_cache = skb;
|
||||
return;
|
||||
}
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
|
||||
@@ -2575,7 +2563,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
|
||||
&skb_shinfo(skb)->tskey);
|
||||
}
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
|
||||
/**
|
||||
* sk_eat_skb - Release a skb if it is no longer needed
|
||||
* @sk: socket to eat this skb from
|
||||
@@ -2587,12 +2574,6 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
|
||||
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
|
||||
!sk->sk_rx_skb_cache) {
|
||||
sk->sk_rx_skb_cache = skb;
|
||||
skb_orphan(skb);
|
||||
return;
|
||||
}
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user