2016-01-24 13:20:23 +00:00
|
|
|
#include <linux/crypto.h>
|
2012-08-31 12:29:11 +00:00
|
|
|
#include <linux/err.h>
|
2012-07-19 06:43:05 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
2012-08-31 12:29:11 +00:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <net/inetpeer.h>
|
|
|
|
#include <net/tcp.h>
|
2012-07-19 06:43:05 +00:00
|
|
|
|
2013-10-31 16:19:32 +00:00
|
|
|
int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
|
2012-08-31 12:29:11 +00:00
|
|
|
|
|
|
|
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
|
|
|
|
|
2013-10-19 19:48:58 +00:00
|
|
|
void tcp_fastopen_init_key_once(bool publish)
|
|
|
|
{
|
|
|
|
static u8 key[TCP_FASTOPEN_KEY_LENGTH];
|
|
|
|
|
|
|
|
/* tcp_fastopen_reset_cipher publishes the new context
|
|
|
|
* atomically, so we allow this race happening here.
|
|
|
|
*
|
|
|
|
* All call sites of tcp_fastopen_cookie_gen also check
|
|
|
|
* for a valid cookie, so this is an acceptable risk.
|
|
|
|
*/
|
|
|
|
if (net_get_random_once(key, sizeof(key)) && publish)
|
|
|
|
tcp_fastopen_reset_cipher(key, sizeof(key));
|
|
|
|
}
|
|
|
|
|
2012-08-31 12:29:11 +00:00
|
|
|
static void tcp_fastopen_ctx_free(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct tcp_fastopen_context *ctx =
|
|
|
|
container_of(head, struct tcp_fastopen_context, rcu);
|
|
|
|
crypto_free_cipher(ctx->tfm);
|
|
|
|
kfree(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int tcp_fastopen_reset_cipher(void *key, unsigned int len)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct tcp_fastopen_context *ctx, *octx;
|
|
|
|
|
|
|
|
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
|
|
|
if (!ctx)
|
|
|
|
return -ENOMEM;
|
|
|
|
ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
|
|
|
|
|
|
|
|
if (IS_ERR(ctx->tfm)) {
|
|
|
|
err = PTR_ERR(ctx->tfm);
|
|
|
|
error: kfree(ctx);
|
|
|
|
pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
err = crypto_cipher_setkey(ctx->tfm, key, len);
|
|
|
|
if (err) {
|
|
|
|
pr_err("TCP: TFO cipher key error: %d\n", err);
|
|
|
|
crypto_free_cipher(ctx->tfm);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
memcpy(ctx->key, key, len);
|
|
|
|
|
|
|
|
spin_lock(&tcp_fastopen_ctx_lock);
|
|
|
|
|
|
|
|
octx = rcu_dereference_protected(tcp_fastopen_ctx,
|
|
|
|
lockdep_is_held(&tcp_fastopen_ctx_lock));
|
|
|
|
rcu_assign_pointer(tcp_fastopen_ctx, ctx);
|
|
|
|
spin_unlock(&tcp_fastopen_ctx_lock);
|
|
|
|
|
|
|
|
if (octx)
|
|
|
|
call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-05-12 03:22:13 +00:00
|
|
|
static bool __tcp_fastopen_cookie_gen(const void *path,
|
|
|
|
struct tcp_fastopen_cookie *foc)
|
2012-08-31 12:29:11 +00:00
|
|
|
{
|
|
|
|
struct tcp_fastopen_context *ctx;
|
2014-05-12 03:22:13 +00:00
|
|
|
bool ok = false;
|
2012-08-31 12:29:11 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
ctx = rcu_dereference(tcp_fastopen_ctx);
|
|
|
|
if (ctx) {
|
2014-05-12 03:22:13 +00:00
|
|
|
crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
|
2012-08-31 12:29:11 +00:00
|
|
|
foc->len = TCP_FASTOPEN_COOKIE_SIZE;
|
2014-05-12 03:22:13 +00:00
|
|
|
ok = true;
|
2012-08-31 12:29:11 +00:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2014-05-12 03:22:13 +00:00
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate the fastopen cookie by doing aes128 encryption on both
|
|
|
|
* the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
|
|
|
|
* addresses. For the longer IPv6 addresses use CBC-MAC.
|
|
|
|
*
|
|
|
|
* XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
|
|
|
|
*/
|
|
|
|
static bool tcp_fastopen_cookie_gen(struct request_sock *req,
|
|
|
|
struct sk_buff *syn,
|
|
|
|
struct tcp_fastopen_cookie *foc)
|
|
|
|
{
|
|
|
|
if (req->rsk_ops->family == AF_INET) {
|
|
|
|
const struct iphdr *iph = ip_hdr(syn);
|
|
|
|
|
|
|
|
__be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
|
|
|
|
return __tcp_fastopen_cookie_gen(path, foc);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (req->rsk_ops->family == AF_INET6) {
|
|
|
|
const struct ipv6hdr *ip6h = ipv6_hdr(syn);
|
|
|
|
struct tcp_fastopen_cookie tmp;
|
|
|
|
|
|
|
|
if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
|
|
|
|
struct in6_addr *buf = (struct in6_addr *) tmp.val;
|
2014-09-29 07:04:37 +00:00
|
|
|
int i;
|
2014-05-12 03:22:13 +00:00
|
|
|
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
|
|
|
|
return __tcp_fastopen_cookie_gen(buf, foc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return false;
|
2012-08-31 12:29:11 +00:00
|
|
|
}
|
2014-05-12 03:22:09 +00:00
|
|
|
|
2016-02-02 05:03:07 +00:00
|
|
|
|
|
|
|
/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
|
|
|
|
* queue this additional data / FIN.
|
|
|
|
*/
|
|
|
|
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
|
|
if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb_dst_drop(skb);
|
2016-03-14 17:52:15 +00:00
|
|
|
/* segs_in has been initialized to 1 in tcp_create_openreq_child().
|
|
|
|
* Hence, reset segs_in to 0 before calling tcp_segs_in()
|
|
|
|
* to avoid double counting. Also, tcp_segs_in() expects
|
|
|
|
* skb->len to include the tcp_hdrlen. Hence, it should
|
|
|
|
* be called before __skb_pull().
|
|
|
|
*/
|
|
|
|
tp->segs_in = 0;
|
|
|
|
tcp_segs_in(tp, skb);
|
2016-02-02 05:03:07 +00:00
|
|
|
__skb_pull(skb, tcp_hdrlen(skb));
|
|
|
|
skb_set_owner_r(skb, sk);
|
|
|
|
|
2016-02-02 05:03:08 +00:00
|
|
|
TCP_SKB_CB(skb)->seq++;
|
|
|
|
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
|
|
|
|
|
2016-02-02 05:03:07 +00:00
|
|
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
|
|
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
|
|
tp->syn_data_acked = 1;
|
|
|
|
|
|
|
|
/* u64_stats_update_begin(&tp->syncp) not needed here,
|
|
|
|
* as we certainly are not changing upper 32bit value (0)
|
|
|
|
*/
|
|
|
|
tp->bytes_received = skb->len;
|
2016-02-06 19:16:28 +00:00
|
|
|
|
|
|
|
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
|
|
|
tcp_fin(sk);
|
2016-02-02 05:03:07 +00:00
|
|
|
}
|
|
|
|
|
2015-09-25 00:16:05 +00:00
|
|
|
static struct sock *tcp_fastopen_create_child(struct sock *sk,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct dst_entry *dst,
|
|
|
|
struct request_sock *req)
|
2014-05-12 03:22:09 +00:00
|
|
|
{
|
2014-06-16 20:30:36 +00:00
|
|
|
struct tcp_sock *tp;
|
2014-05-12 03:22:09 +00:00
|
|
|
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
|
|
|
struct sock *child;
|
2015-10-22 15:20:46 +00:00
|
|
|
bool own_req;
|
2014-05-12 03:22:09 +00:00
|
|
|
|
|
|
|
req->num_retrans = 0;
|
|
|
|
req->num_timeout = 0;
|
|
|
|
req->sk = NULL;
|
|
|
|
|
2015-10-22 15:20:46 +00:00
|
|
|
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
|
|
|
|
NULL, &own_req);
|
2015-04-03 08:17:26 +00:00
|
|
|
if (!child)
|
2015-09-25 00:16:05 +00:00
|
|
|
return NULL;
|
2014-05-12 03:22:09 +00:00
|
|
|
|
2015-09-29 14:42:52 +00:00
|
|
|
spin_lock(&queue->fastopenq.lock);
|
|
|
|
queue->fastopenq.qlen++;
|
|
|
|
spin_unlock(&queue->fastopenq.lock);
|
2014-05-12 03:22:09 +00:00
|
|
|
|
|
|
|
/* Initialize the child socket. Have to fix some values to take
|
|
|
|
* into account the child is a Fast Open socket and is created
|
|
|
|
* only out of the bits carried in the SYN packet.
|
|
|
|
*/
|
|
|
|
tp = tcp_sk(child);
|
|
|
|
|
|
|
|
tp->fastopen_rsk = req;
|
2015-03-18 01:32:29 +00:00
|
|
|
tcp_rsk(req)->tfo_listener = true;
|
2014-05-12 03:22:09 +00:00
|
|
|
|
|
|
|
/* RFC1323: The window in SYN & SYN/ACK segments is never
|
|
|
|
* scaled. So correct it appropriately.
|
|
|
|
*/
|
|
|
|
tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
|
|
|
|
|
|
|
|
/* Activate the retrans timer so that SYNACK can be retransmitted.
|
2015-10-02 18:43:35 +00:00
|
|
|
* The request socket is not added to the ehash
|
2014-05-12 03:22:09 +00:00
|
|
|
* because it's been added to the accept queue directly.
|
|
|
|
*/
|
|
|
|
inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
|
|
|
|
TCP_TIMEOUT_INIT, TCP_RTO_MAX);
|
|
|
|
|
2015-10-02 18:43:35 +00:00
|
|
|
atomic_set(&req->rsk_refcnt, 2);
|
2014-05-12 03:22:09 +00:00
|
|
|
|
|
|
|
/* Now finish processing the fastopen child socket. */
|
|
|
|
inet_csk(child)->icsk_af_ops->rebuild_header(child);
|
|
|
|
tcp_init_congestion_control(child);
|
|
|
|
tcp_mtup_init(child);
|
|
|
|
tcp_init_metrics(child);
|
|
|
|
tcp_init_buffer_space(child);
|
|
|
|
|
2016-02-02 05:03:07 +00:00
|
|
|
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
|
|
|
|
|
|
|
|
tcp_fastopen_add_skb(child, skb);
|
|
|
|
|
|
|
|
tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
|
2015-10-05 04:08:07 +00:00
|
|
|
/* tcp_conn_request() is sending the SYNACK,
|
|
|
|
* and queues the child into listener accept queue.
|
2015-09-25 00:16:05 +00:00
|
|
|
*/
|
|
|
|
return child;
|
2014-05-12 03:22:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool tcp_fastopen_queue_check(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct fastopen_queue *fastopenq;
|
|
|
|
|
|
|
|
/* Make sure the listener has enabled fastopen, and we don't
|
|
|
|
* exceed the max # of pending TFO requests allowed before trying
|
|
|
|
* to validating the cookie in order to avoid burning CPU cycles
|
|
|
|
* unnecessarily.
|
|
|
|
*
|
|
|
|
* XXX (TFO) - The implication of checking the max_qlen before
|
|
|
|
* processing a cookie request is that clients can't differentiate
|
|
|
|
* between qlen overflow causing Fast Open to be disabled
|
|
|
|
* temporarily vs a server not supporting Fast Open at all.
|
|
|
|
*/
|
2015-09-29 14:42:52 +00:00
|
|
|
fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
|
|
|
|
if (fastopenq->max_qlen == 0)
|
2014-05-12 03:22:09 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (fastopenq->qlen >= fastopenq->max_qlen) {
|
|
|
|
struct request_sock *req1;
|
|
|
|
spin_lock(&fastopenq->lock);
|
|
|
|
req1 = fastopenq->rskq_rst_head;
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 02:04:20 +00:00
|
|
|
if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
|
2016-04-27 23:44:39 +00:00
|
|
|
__NET_INC_STATS(sock_net(sk),
|
|
|
|
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
|
2016-04-29 21:16:47 +00:00
|
|
|
spin_unlock(&fastopenq->lock);
|
2014-05-12 03:22:09 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
fastopenq->rskq_rst_head = req1->dl_next;
|
|
|
|
fastopenq->qlen--;
|
|
|
|
spin_unlock(&fastopenq->lock);
|
2015-03-16 04:12:16 +00:00
|
|
|
reqsk_put(req1);
|
2014-05-12 03:22:09 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-05-12 03:22:10 +00:00
|
|
|
/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
|
|
|
|
* may be updated and return the client in the SYN-ACK later. E.g., Fast Open
|
|
|
|
* cookie request (foc->len == 0).
|
|
|
|
*/
|
2015-09-25 00:16:05 +00:00
|
|
|
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct request_sock *req,
|
|
|
|
struct tcp_fastopen_cookie *foc,
|
|
|
|
struct dst_entry *dst)
|
2014-05-12 03:22:09 +00:00
|
|
|
{
|
2014-05-12 03:22:10 +00:00
|
|
|
struct tcp_fastopen_cookie valid_foc = { .len = -1 };
|
|
|
|
bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
|
2015-09-25 00:16:05 +00:00
|
|
|
struct sock *child;
|
2014-05-12 03:22:09 +00:00
|
|
|
|
2015-02-09 20:35:23 +00:00
|
|
|
if (foc->len == 0) /* Client requests a cookie */
|
2016-04-29 21:16:47 +00:00
|
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
|
2015-02-09 20:35:23 +00:00
|
|
|
|
2014-05-12 03:22:10 +00:00
|
|
|
if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
|
|
|
|
(syn_data || foc->len >= 0) &&
|
|
|
|
tcp_fastopen_queue_check(sk))) {
|
|
|
|
foc->len = -1;
|
2015-09-25 00:16:05 +00:00
|
|
|
return NULL;
|
2014-05-12 03:22:09 +00:00
|
|
|
}
|
|
|
|
|
2014-05-12 03:22:10 +00:00
|
|
|
if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
|
|
|
|
goto fastopen;
|
|
|
|
|
2015-02-09 20:35:23 +00:00
|
|
|
if (foc->len >= 0 && /* Client presents or requests a cookie */
|
|
|
|
tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
|
2014-05-12 03:22:13 +00:00
|
|
|
foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
|
2014-05-12 03:22:10 +00:00
|
|
|
foc->len == valid_foc.len &&
|
|
|
|
!memcmp(foc->val, valid_foc.val, foc->len)) {
|
2014-05-12 03:22:11 +00:00
|
|
|
/* Cookie is valid. Create a (full) child socket to accept
|
|
|
|
* the data in SYN before returning a SYN-ACK to ack the
|
|
|
|
* data. If we fail to create the socket, fall back and
|
|
|
|
* ack the ISN only but includes the same cookie.
|
|
|
|
*
|
|
|
|
* Note: Data-less SYN with valid cookie is allowed to send
|
|
|
|
* data in SYN_RECV state.
|
|
|
|
*/
|
2014-05-12 03:22:10 +00:00
|
|
|
fastopen:
|
2015-09-25 00:16:05 +00:00
|
|
|
child = tcp_fastopen_create_child(sk, skb, dst, req);
|
|
|
|
if (child) {
|
2014-05-12 03:22:11 +00:00
|
|
|
foc->len = -1;
|
2016-04-29 21:16:47 +00:00
|
|
|
NET_INC_STATS(sock_net(sk),
|
|
|
|
LINUX_MIB_TCPFASTOPENPASSIVE);
|
2015-09-25 00:16:05 +00:00
|
|
|
return child;
|
2014-05-12 03:22:11 +00:00
|
|
|
}
|
2016-04-29 21:16:47 +00:00
|
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
|
2015-02-09 20:35:23 +00:00
|
|
|
} else if (foc->len > 0) /* Client presents an invalid cookie */
|
2016-04-29 21:16:47 +00:00
|
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
|
2014-05-12 03:22:10 +00:00
|
|
|
|
2015-04-06 21:37:26 +00:00
|
|
|
valid_foc.exp = foc->exp;
|
2014-05-12 03:22:10 +00:00
|
|
|
*foc = valid_foc;
|
2015-09-25 00:16:05 +00:00
|
|
|
return NULL;
|
2014-05-12 03:22:09 +00:00
|
|
|
}
|