mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
Merge branch 'listener-refactoring-preparations'
Eric Dumazet says: ==================== tcp: listener refactoring preparations This patch series makes changes to TCP/DCCP stacks so that we can switch listener code to lockless mode. This is done by marking const the listener socket in all appropriate paths. FastOpen code had to be changed to not dynamically allocate a very small structure to make code simpler for following changes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e6934f3ec0
@ -382,25 +382,11 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
|
||||
tcp_sk(sk)->fastopen_rsk != NULL);
|
||||
}
|
||||
|
||||
extern void tcp_sock_destruct(struct sock *sk);
|
||||
|
||||
static inline int fastopen_init_queue(struct sock *sk, int backlog)
|
||||
static inline void fastopen_queue_tune(struct sock *sk, int backlog)
|
||||
{
|
||||
struct request_sock_queue *queue =
|
||||
&inet_csk(sk)->icsk_accept_queue;
|
||||
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
||||
|
||||
if (queue->fastopenq == NULL) {
|
||||
queue->fastopenq = kzalloc(
|
||||
sizeof(struct fastopen_queue),
|
||||
sk->sk_allocation);
|
||||
if (queue->fastopenq == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
sk->sk_destruct = tcp_sock_destruct;
|
||||
spin_lock_init(&queue->fastopenq->lock);
|
||||
}
|
||||
queue->fastopenq->max_qlen = backlog;
|
||||
return 0;
|
||||
queue->fastopenq.max_qlen = backlog;
|
||||
}
|
||||
|
||||
static inline void tcp_saved_syn_free(struct tcp_sock *tp)
|
||||
|
@ -26,7 +26,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
|
||||
const struct inet_bind_bucket *tb, bool relax);
|
||||
|
||||
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
|
||||
const struct request_sock *req);
|
||||
const struct request_sock *req, u8 proto);
|
||||
|
||||
struct request_sock *inet6_csk_search_req(struct sock *sk,
|
||||
const __be16 rport,
|
||||
|
@ -41,7 +41,7 @@ struct inet_connection_sock_af_ops {
|
||||
int (*rebuild_header)(struct sock *sk);
|
||||
void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
|
||||
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
|
||||
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
|
||||
struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst);
|
||||
u16 net_header_len;
|
||||
@ -268,7 +268,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum);
|
||||
|
||||
struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
|
||||
const struct request_sock *req);
|
||||
struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
|
||||
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
||||
struct sock *newsk,
|
||||
const struct request_sock *req);
|
||||
|
||||
static inline void inet_csk_reqsk_queue_add(struct sock *sk,
|
||||
|
@ -199,7 +199,7 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
|
||||
}
|
||||
|
||||
/* Caller must disable local BH processing. */
|
||||
int __inet_inherit_port(struct sock *sk, struct sock *child);
|
||||
int __inet_inherit_port(const struct sock *sk, struct sock *child);
|
||||
|
||||
void inet_put_port(struct sock *sk);
|
||||
|
||||
|
@ -34,9 +34,9 @@ struct request_sock_ops {
|
||||
char *slab_name;
|
||||
int (*rtx_syn_ack)(const struct sock *sk,
|
||||
struct request_sock *req);
|
||||
void (*send_ack)(struct sock *sk, struct sk_buff *skb,
|
||||
void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req);
|
||||
void (*send_reset)(struct sock *sk,
|
||||
void (*send_reset)(const struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
void (*destructor)(struct request_sock *req);
|
||||
void (*syn_ack_timeout)(const struct request_sock *req);
|
||||
@ -129,9 +129,8 @@ struct listen_sock {
|
||||
atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
|
||||
atomic_t young_dec;
|
||||
|
||||
u8 max_qlen_log ____cacheline_aligned_in_smp;
|
||||
u8 synflood_warned;
|
||||
/* 2 bytes hole, try to use */
|
||||
u32 max_qlen_log ____cacheline_aligned_in_smp;
|
||||
u32 synflood_warned;
|
||||
u32 hash_rnd;
|
||||
u32 nr_table_entries;
|
||||
struct request_sock *syn_table[0];
|
||||
@ -181,11 +180,8 @@ struct request_sock_queue {
|
||||
struct request_sock *rskq_accept_tail;
|
||||
u8 rskq_defer_accept;
|
||||
struct listen_sock *listen_opt;
|
||||
struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
|
||||
* enabled on this listener. Check
|
||||
* max_qlen != 0 in fastopen_queue
|
||||
* to determine if TFO is enabled
|
||||
* right at this moment.
|
||||
struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
|
||||
* if TFO is enabled.
|
||||
*/
|
||||
|
||||
/* temporary alignment, our goal is to get rid of this lock */
|
||||
|
@ -759,7 +759,7 @@ static inline int sk_memalloc_socks(void)
|
||||
|
||||
#endif
|
||||
|
||||
static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
|
||||
static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask)
|
||||
{
|
||||
return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
|
||||
}
|
||||
|
@ -365,8 +365,7 @@ void tcp_wfree(struct sk_buff *skb);
|
||||
void tcp_write_timer_handler(struct sock *sk);
|
||||
void tcp_delack_timer_handler(struct sock *sk);
|
||||
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
|
||||
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
const struct tcphdr *th, unsigned int len);
|
||||
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
const struct tcphdr *th, unsigned int len);
|
||||
void tcp_rcv_space_adjust(struct sock *sk);
|
||||
@ -451,11 +450,11 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_v4_mtu_reduced(struct sock *sk);
|
||||
void tcp_req_err(struct sock *sk, u32 seq);
|
||||
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
|
||||
struct sock *tcp_create_openreq_child(struct sock *sk,
|
||||
struct sock *tcp_create_openreq_child(const struct sock *sk,
|
||||
struct request_sock *req,
|
||||
struct sk_buff *skb);
|
||||
void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
|
||||
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst);
|
||||
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
@ -492,8 +491,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
/* syncookies: remember time of last synqueue overflow
|
||||
* But do not dirty this field too often (once per second is enough)
|
||||
* It is racy as we do not hold a lock, but race is very minor.
|
||||
*/
|
||||
static inline void tcp_synq_overflow(struct sock *sk)
|
||||
static inline void tcp_synq_overflow(const struct sock *sk)
|
||||
{
|
||||
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
||||
unsigned long now = jiffies;
|
||||
@ -520,8 +520,7 @@ static inline u32 tcp_cookie_time(void)
|
||||
|
||||
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
|
||||
u16 *mssp);
|
||||
__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
|
||||
__u16 *mss);
|
||||
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
|
||||
__u32 cookie_init_timestamp(struct request_sock *req);
|
||||
bool cookie_timestamp_decode(struct tcp_options_received *opt);
|
||||
bool cookie_ecn_ok(const struct tcp_options_received *opt,
|
||||
@ -534,8 +533,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
|
||||
const struct tcphdr *th, u16 *mssp);
|
||||
__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
|
||||
__u16 *mss);
|
||||
__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
|
||||
#endif
|
||||
/* tcp_output.c */
|
||||
|
||||
@ -1710,10 +1708,10 @@ struct tcp_request_sock_ops {
|
||||
const struct sock *sk_listener,
|
||||
struct sk_buff *skb);
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
__u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
|
||||
__u32 (*cookie_init_seq)(const struct sk_buff *skb,
|
||||
__u16 *mss);
|
||||
#endif
|
||||
struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
|
||||
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
|
||||
const struct request_sock *req,
|
||||
bool *strict);
|
||||
__u32 (*init_seq)(const struct sk_buff *skb);
|
||||
@ -1726,14 +1724,16 @@ struct tcp_request_sock_ops {
|
||||
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
|
||||
struct sock *sk, struct sk_buff *skb,
|
||||
const struct sock *sk, struct sk_buff *skb,
|
||||
__u16 *mss)
|
||||
{
|
||||
return ops->cookie_init_seq(sk, skb, mss);
|
||||
tcp_synq_overflow(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
|
||||
return ops->cookie_init_seq(skb, mss);
|
||||
}
|
||||
#else
|
||||
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
|
||||
struct sock *sk, struct sk_buff *skb,
|
||||
const struct sock *sk, struct sk_buff *skb,
|
||||
__u16 *mss)
|
||||
{
|
||||
return 0;
|
||||
|
@ -59,6 +59,13 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
|
||||
|
||||
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
|
||||
spin_lock_init(&queue->syn_wait_lock);
|
||||
|
||||
spin_lock_init(&queue->fastopenq.lock);
|
||||
queue->fastopenq.rskq_rst_head = NULL;
|
||||
queue->fastopenq.rskq_rst_tail = NULL;
|
||||
queue->fastopenq.qlen = 0;
|
||||
queue->fastopenq.max_qlen = 0;
|
||||
|
||||
queue->rskq_accept_head = NULL;
|
||||
lopt->nr_table_entries = nr_table_entries;
|
||||
lopt->max_qlen_log = ilog2(nr_table_entries);
|
||||
@ -174,7 +181,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
|
||||
struct sock *lsk = req->rsk_listener;
|
||||
struct fastopen_queue *fastopenq;
|
||||
|
||||
fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
|
||||
fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
|
||||
|
||||
tcp_sk(sk)->fastopen_rsk = NULL;
|
||||
spin_lock_bh(&fastopenq->lock);
|
||||
|
@ -229,7 +229,7 @@ void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
|
||||
int dccp_retransmit_skb(struct sock *sk);
|
||||
|
||||
void dccp_send_ack(struct sock *sk);
|
||||
void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
||||
void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *rsk);
|
||||
|
||||
void dccp_send_sync(struct sock *sk, const u64 seq,
|
||||
@ -270,13 +270,13 @@ int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
|
||||
|
||||
int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
struct sock *dccp_create_openreq_child(struct sock *sk,
|
||||
struct sock *dccp_create_openreq_child(const struct sock *sk,
|
||||
const struct request_sock *req,
|
||||
const struct sk_buff *skb);
|
||||
|
||||
int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst);
|
||||
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||
|
@ -390,7 +390,8 @@ static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
|
||||
*
|
||||
* This is the equivalent of TCP's tcp_v4_syn_recv_sock
|
||||
*/
|
||||
struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
@ -527,7 +528,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
|
||||
static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
||||
{
|
||||
int err;
|
||||
const struct iphdr *rxiph;
|
||||
|
@ -234,7 +234,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
|
||||
kfree_skb(inet_rsk(req)->pktopts);
|
||||
}
|
||||
|
||||
static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
|
||||
static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
||||
{
|
||||
const struct ipv6hdr *rxip6h;
|
||||
struct sk_buff *skb;
|
||||
@ -408,13 +408,14 @@ drop:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
|
||||
static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
struct inet_request_sock *ireq = inet_rsk(req);
|
||||
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
|
||||
struct ipv6_pinfo *newnp;
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct inet_sock *newinet;
|
||||
struct dccp6_sock *newdp6;
|
||||
struct sock *newsk;
|
||||
@ -462,22 +463,11 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
|
||||
if (sk_acceptq_is_full(sk))
|
||||
goto out_overflow;
|
||||
|
||||
if (dst == NULL) {
|
||||
struct in6_addr *final_p, final;
|
||||
if (!dst) {
|
||||
struct flowi6 fl6;
|
||||
|
||||
memset(&fl6, 0, sizeof(fl6));
|
||||
fl6.flowi6_proto = IPPROTO_DCCP;
|
||||
fl6.daddr = ireq->ir_v6_rmt_addr;
|
||||
final_p = fl6_update_dst(&fl6, np->opt, &final);
|
||||
fl6.saddr = ireq->ir_v6_loc_addr;
|
||||
fl6.flowi6_oif = sk->sk_bound_dev_if;
|
||||
fl6.fl6_dport = ireq->ir_rmt_port;
|
||||
fl6.fl6_sport = htons(ireq->ir_num);
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
||||
|
||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
||||
if (IS_ERR(dst))
|
||||
dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
|
||||
if (!dst)
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
|
||||
dccp_done(sk);
|
||||
}
|
||||
|
||||
struct sock *dccp_create_openreq_child(struct sock *sk,
|
||||
struct sock *dccp_create_openreq_child(const struct sock *sk,
|
||||
const struct request_sock *req,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
@ -236,7 +236,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_child_process);
|
||||
|
||||
void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
||||
void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *rsk)
|
||||
{
|
||||
DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
|
||||
|
@ -219,17 +219,13 @@ int inet_listen(struct socket *sock, int backlog)
|
||||
* shutdown() (rather than close()).
|
||||
*/
|
||||
if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
|
||||
!inet_csk(sk)->icsk_accept_queue.fastopenq) {
|
||||
!inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
|
||||
if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
|
||||
err = fastopen_init_queue(sk, backlog);
|
||||
fastopen_queue_tune(sk, backlog);
|
||||
else if ((sysctl_tcp_fastopen &
|
||||
TFO_SERVER_WO_SOCKOPT2) != 0)
|
||||
err = fastopen_init_queue(sk,
|
||||
fastopen_queue_tune(sk,
|
||||
((uint)sysctl_tcp_fastopen) >> 16);
|
||||
else
|
||||
err = 0;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
tcp_fastopen_init_key_once(true);
|
||||
}
|
||||
|
@ -335,9 +335,8 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
|
||||
|
||||
sk_acceptq_removed(sk);
|
||||
if (sk->sk_protocol == IPPROTO_TCP &&
|
||||
tcp_rsk(req)->tfo_listener &&
|
||||
queue->fastopenq) {
|
||||
spin_lock_bh(&queue->fastopenq->lock);
|
||||
tcp_rsk(req)->tfo_listener) {
|
||||
spin_lock_bh(&queue->fastopenq.lock);
|
||||
if (tcp_rsk(req)->tfo_listener) {
|
||||
/* We are still waiting for the final ACK from 3WHS
|
||||
* so can't free req now. Instead, we set req->sk to
|
||||
@ -348,7 +347,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
|
||||
req->sk = NULL;
|
||||
req = NULL;
|
||||
}
|
||||
spin_unlock_bh(&queue->fastopenq->lock);
|
||||
spin_unlock_bh(&queue->fastopenq.lock);
|
||||
}
|
||||
out:
|
||||
release_sock(sk);
|
||||
@ -439,7 +438,7 @@ no_route:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_csk_route_req);
|
||||
|
||||
struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
|
||||
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
||||
struct sock *newsk,
|
||||
const struct request_sock *req)
|
||||
{
|
||||
@ -886,12 +885,12 @@ void inet_csk_listen_stop(struct sock *sk)
|
||||
sk_acceptq_removed(sk);
|
||||
reqsk_put(req);
|
||||
}
|
||||
if (queue->fastopenq) {
|
||||
if (queue->fastopenq.rskq_rst_head) {
|
||||
/* Free all the reqs queued in rskq_rst_head. */
|
||||
spin_lock_bh(&queue->fastopenq->lock);
|
||||
acc_req = queue->fastopenq->rskq_rst_head;
|
||||
queue->fastopenq->rskq_rst_head = NULL;
|
||||
spin_unlock_bh(&queue->fastopenq->lock);
|
||||
spin_lock_bh(&queue->fastopenq.lock);
|
||||
acc_req = queue->fastopenq.rskq_rst_head;
|
||||
queue->fastopenq.rskq_rst_head = NULL;
|
||||
spin_unlock_bh(&queue->fastopenq.lock);
|
||||
while ((req = acc_req) != NULL) {
|
||||
acc_req = req->dl_next;
|
||||
reqsk_put(req);
|
||||
|
@ -126,7 +126,7 @@ void inet_put_port(struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL(inet_put_port);
|
||||
|
||||
int __inet_inherit_port(struct sock *sk, struct sock *child)
|
||||
int __inet_inherit_port(const struct sock *sk, struct sock *child)
|
||||
{
|
||||
struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
|
||||
unsigned short port = inet_sk(child)->inet_num;
|
||||
|
@ -192,15 +192,11 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
|
||||
|
||||
__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
|
||||
__u16 *mssp)
|
||||
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
const struct tcphdr *th = tcp_hdr(skb);
|
||||
|
||||
tcp_synq_overflow(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
|
||||
|
||||
return __cookie_v4_init_sequence(iph, th, mssp);
|
||||
}
|
||||
|
||||
|
@ -2253,13 +2253,6 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_disconnect);
|
||||
|
||||
void tcp_sock_destruct(struct sock *sk)
|
||||
{
|
||||
inet_sock_destruct(sk);
|
||||
|
||||
kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
|
||||
}
|
||||
|
||||
static inline bool tcp_can_repair_sock(const struct sock *sk)
|
||||
{
|
||||
return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
|
||||
@ -2581,7 +2574,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
||||
TCPF_LISTEN))) {
|
||||
tcp_fastopen_init_key_once(true);
|
||||
|
||||
err = fastopen_init_queue(sk, val);
|
||||
fastopen_queue_tune(sk, val);
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
}
|
||||
@ -2849,10 +2842,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
||||
break;
|
||||
|
||||
case TCP_FASTOPEN:
|
||||
if (icsk->icsk_accept_queue.fastopenq)
|
||||
val = icsk->icsk_accept_queue.fastopenq->max_qlen;
|
||||
else
|
||||
val = 0;
|
||||
val = icsk->icsk_accept_queue.fastopenq.max_qlen;
|
||||
break;
|
||||
|
||||
case TCP_TIMESTAMP:
|
||||
|
@ -142,9 +142,9 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
|
||||
if (!child)
|
||||
return NULL;
|
||||
|
||||
spin_lock(&queue->fastopenq->lock);
|
||||
queue->fastopenq->qlen++;
|
||||
spin_unlock(&queue->fastopenq->lock);
|
||||
spin_lock(&queue->fastopenq.lock);
|
||||
queue->fastopenq.qlen++;
|
||||
spin_unlock(&queue->fastopenq.lock);
|
||||
|
||||
/* Initialize the child socket. Have to fix some values to take
|
||||
* into account the child is a Fast Open socket and is created
|
||||
@ -237,8 +237,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
|
||||
* between qlen overflow causing Fast Open to be disabled
|
||||
* temporarily vs a server not supporting Fast Open at all.
|
||||
*/
|
||||
fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
|
||||
if (!fastopenq || fastopenq->max_qlen == 0)
|
||||
fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
|
||||
if (fastopenq->max_qlen == 0)
|
||||
return false;
|
||||
|
||||
if (fastopenq->qlen >= fastopenq->max_qlen) {
|
||||
|
@ -5472,7 +5472,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
|
||||
}
|
||||
|
||||
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
const struct tcphdr *th, unsigned int len)
|
||||
const struct tcphdr *th)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
@ -5698,11 +5698,11 @@ reset_and_undo:
|
||||
* address independent.
|
||||
*/
|
||||
|
||||
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
const struct tcphdr *th, unsigned int len)
|
||||
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
const struct tcphdr *th = tcp_hdr(skb);
|
||||
struct request_sock *req;
|
||||
int queued = 0;
|
||||
bool acceptable;
|
||||
@ -5749,7 +5749,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
goto discard;
|
||||
|
||||
case TCP_SYN_SENT:
|
||||
queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
|
||||
queued = tcp_rcv_synsent_state_process(sk, skb, th);
|
||||
if (queued >= 0)
|
||||
return queued;
|
||||
|
||||
@ -6064,7 +6064,7 @@ EXPORT_SYMBOL(inet_reqsk_alloc);
|
||||
/*
|
||||
* Return true if a syncookie should be sent
|
||||
*/
|
||||
static bool tcp_syn_flood_action(struct sock *sk,
|
||||
static bool tcp_syn_flood_action(const struct sock *sk,
|
||||
const struct sk_buff *skb,
|
||||
const char *proto)
|
||||
{
|
||||
@ -6082,11 +6082,12 @@ static bool tcp_syn_flood_action(struct sock *sk,
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
|
||||
|
||||
lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
|
||||
if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
|
||||
lopt->synflood_warned = 1;
|
||||
if (!lopt->synflood_warned &&
|
||||
sysctl_tcp_syncookies != 2 &&
|
||||
xchg(&lopt->synflood_warned, 1) == 0)
|
||||
pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
|
||||
proto, ntohs(tcp_hdr(skb)->dest), msg);
|
||||
}
|
||||
|
||||
return want_cookie;
|
||||
}
|
||||
|
||||
|
@ -576,7 +576,7 @@ EXPORT_SYMBOL(tcp_v4_send_check);
|
||||
* Exception: precedence violation. We do not implement it in any case.
|
||||
*/
|
||||
|
||||
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
|
||||
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
const struct tcphdr *th = tcp_hdr(skb);
|
||||
struct {
|
||||
@ -795,7 +795,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
|
||||
inet_twsk_put(tw);
|
||||
}
|
||||
|
||||
static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
||||
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req)
|
||||
{
|
||||
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
|
||||
@ -1180,7 +1180,8 @@ static void tcp_v4_init_req(struct request_sock *req,
|
||||
ireq->opt = tcp_v4_save_options(skb);
|
||||
}
|
||||
|
||||
static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
|
||||
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
|
||||
struct flowi *fl,
|
||||
const struct request_sock *req,
|
||||
bool *strict)
|
||||
{
|
||||
@ -1242,7 +1243,7 @@ EXPORT_SYMBOL(tcp_v4_conn_request);
|
||||
* The three way handshake has completed - we got a valid synack -
|
||||
* now create the new socket.
|
||||
*/
|
||||
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
@ -1420,7 +1421,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
} else
|
||||
sock_rps_save_rxhash(sk, skb);
|
||||
|
||||
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
|
||||
if (tcp_rcv_state_process(sk, skb)) {
|
||||
rsk = sk;
|
||||
goto reset;
|
||||
}
|
||||
@ -2185,7 +2186,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
const struct inet_sock *inet = inet_sk(sk);
|
||||
struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
|
||||
const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
|
||||
__be32 dest = inet->inet_daddr;
|
||||
__be32 src = inet->inet_rcv_saddr;
|
||||
__u16 destp = ntohs(inet->inet_dport);
|
||||
|
@ -441,7 +441,9 @@ EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
|
||||
* Actually, we could lots of memory writes here. tp of listening
|
||||
* socket contains all necessary default parameters.
|
||||
*/
|
||||
struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
|
||||
struct sock *tcp_create_openreq_child(const struct sock *sk,
|
||||
struct request_sock *req,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
|
||||
|
||||
@ -821,8 +823,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
|
||||
int state = child->sk_state;
|
||||
|
||||
if (!sock_owned_by_user(child)) {
|
||||
ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
|
||||
skb->len);
|
||||
ret = tcp_rcv_state_process(child, skb);
|
||||
/* Wakeup parent, send SIGIO */
|
||||
if (state == TCP_SYN_RECV && child->sk_state != state)
|
||||
parent->sk_data_ready(parent);
|
||||
|
@ -67,15 +67,16 @@ EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
|
||||
|
||||
struct dst_entry *inet6_csk_route_req(const struct sock *sk,
|
||||
struct flowi6 *fl6,
|
||||
const struct request_sock *req)
|
||||
const struct request_sock *req,
|
||||
u8 proto)
|
||||
{
|
||||
struct inet_request_sock *ireq = inet_rsk(req);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct in6_addr *final_p, final;
|
||||
struct dst_entry *dst;
|
||||
|
||||
memset(fl6, 0, sizeof(*fl6));
|
||||
fl6->flowi6_proto = IPPROTO_TCP;
|
||||
fl6->flowi6_proto = proto;
|
||||
fl6->daddr = ireq->ir_v6_rmt_addr;
|
||||
final_p = fl6_update_dst(fl6, np->opt, &final);
|
||||
fl6->saddr = ireq->ir_v6_loc_addr;
|
||||
@ -91,6 +92,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
|
||||
|
||||
return dst;
|
||||
}
|
||||
EXPORT_SYMBOL(inet6_csk_route_req);
|
||||
|
||||
/*
|
||||
* request_sock (formerly open request) hash tables.
|
||||
|
@ -114,14 +114,11 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
|
||||
|
||||
__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
|
||||
__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp)
|
||||
{
|
||||
const struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
const struct tcphdr *th = tcp_hdr(skb);
|
||||
|
||||
tcp_synq_overflow(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
|
||||
|
||||
return __cookie_v6_init_sequence(iph, th, mssp);
|
||||
}
|
||||
|
||||
|
@ -70,8 +70,8 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
|
||||
static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
||||
static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
|
||||
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req);
|
||||
|
||||
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
@ -447,7 +447,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||
int err = -ENOMEM;
|
||||
|
||||
/* First, grab a route. */
|
||||
if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
|
||||
if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
|
||||
IPPROTO_TCP)) == NULL)
|
||||
goto done;
|
||||
|
||||
skb = tcp_make_synack(sk, dst, req, foc);
|
||||
@ -688,13 +689,14 @@ static void tcp_v6_init_req(struct request_sock *req,
|
||||
}
|
||||
}
|
||||
|
||||
static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
|
||||
static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
|
||||
struct flowi *fl,
|
||||
const struct request_sock *req,
|
||||
bool *strict)
|
||||
{
|
||||
if (strict)
|
||||
*strict = true;
|
||||
return inet6_csk_route_req(sk, &fl->u.ip6, req);
|
||||
return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
|
||||
}
|
||||
|
||||
struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
|
||||
@ -724,7 +726,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
|
||||
.queue_hash_add = inet6_csk_reqsk_queue_hash_add,
|
||||
};
|
||||
|
||||
static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
|
||||
static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
|
||||
u32 ack, u32 win, u32 tsval, u32 tsecr,
|
||||
int oif, struct tcp_md5sig_key *key, int rst,
|
||||
u8 tclass, u32 label)
|
||||
@ -823,7 +825,7 @@ static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
|
||||
kfree_skb(buff);
|
||||
}
|
||||
|
||||
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
|
||||
static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
const struct tcphdr *th = tcp_hdr(skb);
|
||||
u32 seq = 0, ack_seq = 0;
|
||||
@ -894,7 +896,7 @@ release_sk1:
|
||||
#endif
|
||||
}
|
||||
|
||||
static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
|
||||
static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
|
||||
u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
|
||||
struct tcp_md5sig_key *key, u8 tclass,
|
||||
u32 label)
|
||||
@ -917,7 +919,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
|
||||
inet_twsk_put(tw);
|
||||
}
|
||||
|
||||
static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
||||
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req)
|
||||
{
|
||||
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
|
||||
@ -985,12 +987,13 @@ drop:
|
||||
return 0; /* don't send reset */
|
||||
}
|
||||
|
||||
static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
struct inet_request_sock *ireq;
|
||||
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
|
||||
struct ipv6_pinfo *newnp;
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct tcp6_sock *newtcp6sk;
|
||||
struct inet_sock *newinet;
|
||||
struct tcp_sock *newtp;
|
||||
@ -1058,7 +1061,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
goto out_overflow;
|
||||
|
||||
if (!dst) {
|
||||
dst = inet6_csk_route_req(sk, &fl6, req);
|
||||
dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
|
||||
if (!dst)
|
||||
goto out;
|
||||
}
|
||||
@ -1272,7 +1275,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
} else
|
||||
sock_rps_save_rxhash(sk, skb);
|
||||
|
||||
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
|
||||
if (tcp_rcv_state_process(sk, skb))
|
||||
goto reset;
|
||||
if (opt_skb)
|
||||
goto ipv6_pktoptions;
|
||||
@ -1669,7 +1672,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||
const struct inet_sock *inet = inet_sk(sp);
|
||||
const struct tcp_sock *tp = tcp_sk(sp);
|
||||
const struct inet_connection_sock *icsk = inet_csk(sp);
|
||||
struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
|
||||
const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
|
||||
|
||||
dest = &sp->sk_v6_daddr;
|
||||
src = &sp->sk_v6_rcv_saddr;
|
||||
@ -1713,7 +1716,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
|
||||
tp->snd_cwnd,
|
||||
sp->sk_state == TCP_LISTEN ?
|
||||
(fastopenq ? fastopenq->max_qlen : 0) :
|
||||
fastopenq->max_qlen :
|
||||
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
|
||||
);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user