forked from Minki/linux
ipv4: Namespaceify tcp_fastopen_blackhole_timeout knob
Different namespace application might require different time period in second to disable Fastopen on active TCP sockets. Tested: Simulate following similar situation that the server's data gets dropped after 3WHS. C ---- syn-data ---> S C <--- syn/ack ----- S C ---- ack --------> S S (accept & write) C? X <- data ------ S [retry and timeout] And then print netstat of TCPFastOpenBlackhole, the counter increased as expected when the firewall blackhole issue is detected and active TFO is disabled. # cat /proc/net/netstat | awk '{print $91}' TCPFastOpenBlackhole 1 Signed-off-by: Haishuang Yan <yanhaishuang@cmss.chinamobile.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4371384856
commit
3733be14a3
@ -133,6 +133,9 @@ struct netns_ipv4 {
|
||||
int sysctl_tcp_fastopen;
|
||||
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
|
||||
spinlock_t tcp_fastopen_ctx_lock;
|
||||
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
|
||||
atomic_t tfo_active_disable_times;
|
||||
unsigned long tfo_active_disable_stamp;
|
||||
|
||||
#ifdef CONFIG_NET_L3_MASTER_DEV
|
||||
int sysctl_udp_l3mdev_accept;
|
||||
|
@ -355,11 +355,13 @@ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = container_of(table->data, struct net,
|
||||
ipv4.sysctl_tcp_fastopen_blackhole_timeout);
|
||||
int ret;
|
||||
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (write && ret == 0)
|
||||
tcp_fastopen_active_timeout_reset();
|
||||
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -397,14 +399,6 @@ static struct ctl_table ipv4_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{
|
||||
.procname = "tcp_fastopen_blackhole_timeout_sec",
|
||||
.data = &sysctl_tcp_fastopen_blackhole_timeout,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_tfo_blackhole_detect_timeout,
|
||||
.extra1 = &zero,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_abort_on_overflow",
|
||||
.data = &sysctl_tcp_abort_on_overflow,
|
||||
@ -1083,6 +1077,14 @@ static struct ctl_table ipv4_net_table[] = {
|
||||
.maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
|
||||
.proc_handler = proc_tcp_fastopen_key,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_fastopen_blackhole_timeout_sec",
|
||||
.data = &init_net.ipv4.sysctl_tcp_fastopen_blackhole_timeout,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_tfo_blackhole_detect_timeout,
|
||||
.extra1 = &zero,
|
||||
},
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
{
|
||||
.procname = "fib_multipath_use_neigh",
|
||||
|
@ -422,25 +422,16 @@ EXPORT_SYMBOL(tcp_fastopen_defer_connect);
|
||||
* TFO connection with data exchanges.
|
||||
*/
|
||||
|
||||
/* Default to 1hr */
|
||||
unsigned int sysctl_tcp_fastopen_blackhole_timeout __read_mostly = 60 * 60;
|
||||
static atomic_t tfo_active_disable_times __read_mostly = ATOMIC_INIT(0);
|
||||
static unsigned long tfo_active_disable_stamp __read_mostly;
|
||||
|
||||
/* Disable active TFO and record current jiffies and
|
||||
* tfo_active_disable_times
|
||||
*/
|
||||
void tcp_fastopen_active_disable(struct sock *sk)
|
||||
{
|
||||
atomic_inc(&tfo_active_disable_times);
|
||||
tfo_active_disable_stamp = jiffies;
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE);
|
||||
}
|
||||
struct net *net = sock_net(sk);
|
||||
|
||||
/* Reset tfo_active_disable_times to 0 */
|
||||
void tcp_fastopen_active_timeout_reset(void)
|
||||
{
|
||||
atomic_set(&tfo_active_disable_times, 0);
|
||||
atomic_inc(&net->ipv4.tfo_active_disable_times);
|
||||
net->ipv4.tfo_active_disable_stamp = jiffies;
|
||||
NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
|
||||
}
|
||||
|
||||
/* Calculate timeout for tfo active disable
|
||||
@ -449,17 +440,18 @@ void tcp_fastopen_active_timeout_reset(void)
|
||||
*/
|
||||
bool tcp_fastopen_active_should_disable(struct sock *sk)
|
||||
{
|
||||
int tfo_da_times = atomic_read(&tfo_active_disable_times);
|
||||
int multiplier;
|
||||
unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
|
||||
int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
|
||||
unsigned long timeout;
|
||||
int multiplier;
|
||||
|
||||
if (!tfo_da_times)
|
||||
return false;
|
||||
|
||||
/* Limit timout to max: 2^6 * initial timeout */
|
||||
multiplier = 1 << min(tfo_da_times - 1, 6);
|
||||
timeout = multiplier * sysctl_tcp_fastopen_blackhole_timeout * HZ;
|
||||
if (time_before(jiffies, tfo_active_disable_stamp + timeout))
|
||||
timeout = multiplier * tfo_bh_timeout * HZ;
|
||||
if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
|
||||
return true;
|
||||
|
||||
/* Mark check bit so we can check for successful active TFO
|
||||
@ -495,10 +487,10 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
|
||||
}
|
||||
}
|
||||
} else if (tp->syn_fastopen_ch &&
|
||||
atomic_read(&tfo_active_disable_times)) {
|
||||
atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
|
||||
dst = sk_dst_get(sk);
|
||||
if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
|
||||
tcp_fastopen_active_timeout_reset();
|
||||
atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
|
||||
dst_release(dst);
|
||||
}
|
||||
}
|
||||
|
@ -2474,6 +2474,8 @@ static int __net_init tcp_sk_init(struct net *net)
|
||||
|
||||
net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
|
||||
spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
|
||||
net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
|
||||
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
|
Loading…
Reference in New Issue
Block a user