2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-12-14 07:24:53 +00:00
|
|
|
/*
|
|
|
|
* DCCP over IPv6
|
2006-03-21 06:01:29 +00:00
|
|
|
* Linux INET6 implementation
|
2005-12-14 07:24:53 +00:00
|
|
|
*
|
|
|
|
* Based on net/dccp6/ipv6.c
|
|
|
|
*
|
|
|
|
* Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/random.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2005-12-14 07:24:53 +00:00
|
|
|
#include <linux/xfrm.h>
|
2017-08-31 05:28:01 +00:00
|
|
|
#include <linux/string.h>
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
#include <net/addrconf.h>
|
|
|
|
#include <net/inet_common.h>
|
|
|
|
#include <net/inet_hashtables.h>
|
2005-12-27 04:43:12 +00:00
|
|
|
#include <net/inet_sock.h>
|
2005-12-14 07:24:53 +00:00
|
|
|
#include <net/inet6_connection_sock.h>
|
|
|
|
#include <net/inet6_hashtables.h>
|
|
|
|
#include <net/ip6_route.h>
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/protocol.h>
|
|
|
|
#include <net/transp_v6.h>
|
2006-01-07 06:55:39 +00:00
|
|
|
#include <net/ip6_checksum.h>
|
2005-12-14 07:24:53 +00:00
|
|
|
#include <net/xfrm.h>
|
2011-08-04 03:50:44 +00:00
|
|
|
#include <net/secure_seq.h>
|
2021-04-08 17:45:02 +00:00
|
|
|
#include <net/netns/generic.h>
|
2017-08-31 05:28:01 +00:00
|
|
|
#include <net/sock.h>
|
2024-04-25 03:13:35 +00:00
|
|
|
#include <net/rstreason.h>
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
#include "dccp.h"
|
|
|
|
#include "ipv6.h"
|
2006-07-24 06:33:28 +00:00
|
|
|
#include "feat.h"
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2021-04-08 17:45:02 +00:00
|
|
|
struct dccp_v6_pernet {
|
|
|
|
struct sock *v6_ctl_sk;
|
|
|
|
};
|
|
|
|
|
|
|
|
static unsigned int dccp_v6_pernet_id __read_mostly;
|
|
|
|
|
|
|
|
/* The per-net v6_ctl_sk is used for sending RSTs and ACKs */
|
2006-03-21 06:00:37 +00:00
|
|
|
|
2009-09-01 19:25:04 +00:00
|
|
|
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
|
|
|
|
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2006-11-10 19:43:06 +00:00
|
|
|
/* add pseudo-header to DCCP checksum stored in skb->csum */
|
2006-11-15 05:35:48 +00:00
|
|
|
static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
|
2011-04-22 04:53:02 +00:00
|
|
|
const struct in6_addr *saddr,
|
|
|
|
const struct in6_addr *daddr)
|
2005-12-14 07:24:53 +00:00
|
|
|
{
|
2006-11-10 19:43:06 +00:00
|
|
|
return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
|
|
|
|
}
|
|
|
|
|
2010-04-11 02:15:55 +00:00
|
|
|
static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
|
2006-11-10 19:43:06 +00:00
|
|
|
{
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct dccp_hdr *dh = dccp_hdr(skb);
|
|
|
|
|
|
|
|
dccp_csum_outgoing(skb);
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-03 22:42:29 +00:00
|
|
|
dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
2011-08-04 03:50:44 +00:00
|
|
|
static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
|
2006-11-13 15:34:38 +00:00
|
|
|
{
|
2007-04-26 00:54:47 +00:00
|
|
|
return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
|
|
|
|
ipv6_hdr(skb)->saddr.s6_addr32,
|
2006-11-13 15:34:38 +00:00
|
|
|
dccp_hdr(skb)->dccph_dport,
|
|
|
|
dccp_hdr(skb)->dccph_sport );
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
2018-11-08 11:19:21 +00:00
|
|
|
static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
2009-06-23 11:31:07 +00:00
|
|
|
u8 type, u8 code, int offset, __be32 info)
|
2005-12-14 07:24:53 +00:00
|
|
|
{
|
2023-08-25 13:32:41 +00:00
|
|
|
const struct ipv6hdr *hdr;
|
2016-11-03 03:30:48 +00:00
|
|
|
const struct dccp_hdr *dh;
|
2008-07-26 10:59:10 +00:00
|
|
|
struct dccp_sock *dp;
|
2005-12-14 07:24:53 +00:00
|
|
|
struct ipv6_pinfo *np;
|
|
|
|
struct sock *sk;
|
|
|
|
int err;
|
|
|
|
__u64 seq;
|
2008-07-17 03:28:42 +00:00
|
|
|
struct net *net = dev_net(skb->dev);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2023-09-15 19:00:35 +00:00
|
|
|
if (!pskb_may_pull(skb, offset + sizeof(*dh)))
|
|
|
|
return -EINVAL;
|
2023-08-25 13:32:41 +00:00
|
|
|
dh = (struct dccp_hdr *)(skb->data + offset);
|
|
|
|
if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
|
|
|
|
return -EINVAL;
|
|
|
|
hdr = (const struct ipv6hdr *)skb->data;
|
2016-11-03 03:30:48 +00:00
|
|
|
dh = (struct dccp_hdr *)(skb->data + offset);
|
2008-07-26 10:59:11 +00:00
|
|
|
|
2015-03-22 17:22:25 +00:00
|
|
|
sk = __inet6_lookup_established(net, &dccp_hashinfo,
|
|
|
|
&hdr->daddr, dh->dccph_dport,
|
|
|
|
&hdr->saddr, ntohs(dh->dccph_sport),
|
2017-08-07 15:44:21 +00:00
|
|
|
inet6_iif(skb), 0);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2015-03-22 17:22:25 +00:00
|
|
|
if (!sk) {
|
2016-04-27 23:44:36 +00:00
|
|
|
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
|
|
|
|
ICMP6_MIB_INERRORS);
|
2018-11-08 11:19:21 +00:00
|
|
|
return -ENOENT;
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sk->sk_state == DCCP_TIME_WAIT) {
|
2006-10-11 02:41:46 +00:00
|
|
|
inet_twsk_put(inet_twsk(sk));
|
2018-11-08 11:19:21 +00:00
|
|
|
return 0;
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
2015-03-22 17:22:25 +00:00
|
|
|
seq = dccp_hdr_seq(dh);
|
2018-11-08 11:19:21 +00:00
|
|
|
if (sk->sk_state == DCCP_NEW_SYN_RECV) {
|
|
|
|
dccp_req_err(sk, seq);
|
|
|
|
return 0;
|
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
bh_lock_sock(sk);
|
|
|
|
if (sock_owned_by_user(sk))
|
2016-04-27 23:44:39 +00:00
|
|
|
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
if (sk->sk_state == DCCP_CLOSED)
|
|
|
|
goto out;
|
|
|
|
|
2008-07-26 10:59:10 +00:00
|
|
|
dp = dccp_sk(sk);
|
|
|
|
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
|
|
|
|
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
|
2016-04-27 23:44:39 +00:00
|
|
|
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
2008-07-26 10:59:10 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
np = inet6_sk(sk);
|
|
|
|
|
2012-07-12 07:25:15 +00:00
|
|
|
if (type == NDISC_REDIRECT) {
|
2017-03-10 05:40:33 +00:00
|
|
|
if (!sock_owned_by_user(sk)) {
|
|
|
|
struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
|
2012-07-12 07:25:15 +00:00
|
|
|
|
2017-03-10 05:40:33 +00:00
|
|
|
if (dst)
|
|
|
|
dst->ops->redirect(dst, sk, skb);
|
|
|
|
}
|
2013-09-18 12:03:27 +00:00
|
|
|
goto out;
|
2012-07-12 07:25:15 +00:00
|
|
|
}
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
if (type == ICMPV6_PKT_TOOBIG) {
|
|
|
|
struct dst_entry *dst = NULL;
|
|
|
|
|
2013-12-15 02:41:14 +00:00
|
|
|
if (!ip6_sk_accept_pmtu(sk))
|
|
|
|
goto out;
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
if (sock_owned_by_user(sk))
|
|
|
|
goto out;
|
|
|
|
if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
|
|
|
|
goto out;
|
|
|
|
|
2012-07-16 10:44:56 +00:00
|
|
|
dst = inet6_csk_update_pmtu(sk, ntohl(info));
|
|
|
|
if (!dst)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
|
2005-12-14 07:24:53 +00:00
|
|
|
dccp_sync_mss(sk, dst_mtu(dst));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
icmpv6_err_convert(type, code, &err);
|
|
|
|
|
|
|
|
/* Might be for an request_sock */
|
|
|
|
switch (sk->sk_state) {
|
|
|
|
case DCCP_REQUESTING:
|
|
|
|
case DCCP_RESPOND: /* Cannot happen.
|
2006-03-21 06:01:29 +00:00
|
|
|
It can, it SYNs are crossed. --ANK */
|
2005-12-14 07:24:53 +00:00
|
|
|
if (!sock_owned_by_user(sk)) {
|
2016-04-27 23:44:28 +00:00
|
|
|
__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
|
2005-12-14 07:24:53 +00:00
|
|
|
sk->sk_err = err;
|
|
|
|
/*
|
|
|
|
* Wake people up to see the error
|
|
|
|
* (see connect in sock.c)
|
|
|
|
*/
|
2021-06-27 22:48:21 +00:00
|
|
|
sk_error_report(sk);
|
2005-12-14 07:24:53 +00:00
|
|
|
dccp_done(sk);
|
2023-03-15 20:57:42 +00:00
|
|
|
} else {
|
|
|
|
WRITE_ONCE(sk->sk_err_soft, err);
|
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-09-12 16:02:08 +00:00
|
|
|
if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
|
2005-12-14 07:24:53 +00:00
|
|
|
sk->sk_err = err;
|
2021-06-27 22:48:21 +00:00
|
|
|
sk_error_report(sk);
|
2023-03-15 20:57:42 +00:00
|
|
|
} else {
|
|
|
|
WRITE_ONCE(sk->sk_err_soft, err);
|
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
out:
|
|
|
|
bh_unlock_sock(sk);
|
|
|
|
sock_put(sk);
|
2018-11-08 11:19:21 +00:00
|
|
|
return 0;
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-09-25 14:39:23 +00:00
|
|
|
static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
|
2005-12-14 07:24:53 +00:00
|
|
|
{
|
2013-10-09 22:21:29 +00:00
|
|
|
struct inet_request_sock *ireq = inet_rsk(req);
|
2005-12-14 07:24:53 +00:00
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct sk_buff *skb;
|
2010-06-01 21:35:01 +00:00
|
|
|
struct in6_addr *final_p, final;
|
2011-03-12 21:22:43 +00:00
|
|
|
struct flowi6 fl6;
|
2005-12-14 07:24:53 +00:00
|
|
|
int err = -1;
|
2008-02-29 19:43:03 +00:00
|
|
|
struct dst_entry *dst;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2011-03-12 21:22:43 +00:00
|
|
|
memset(&fl6, 0, sizeof(fl6));
|
|
|
|
fl6.flowi6_proto = IPPROTO_DCCP;
|
2013-10-09 22:21:29 +00:00
|
|
|
fl6.daddr = ireq->ir_v6_rmt_addr;
|
|
|
|
fl6.saddr = ireq->ir_v6_loc_addr;
|
2011-03-12 21:22:43 +00:00
|
|
|
fl6.flowlabel = 0;
|
2013-10-09 22:21:29 +00:00
|
|
|
fl6.flowi6_oif = ireq->ir_iif;
|
|
|
|
fl6.fl6_dport = ireq->ir_rmt_port;
|
2013-10-10 07:04:37 +00:00
|
|
|
fl6.fl6_sport = htons(ireq->ir_num);
|
2020-09-28 02:38:26 +00:00
|
|
|
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
|
2015-11-30 03:37:57 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
|
|
|
|
rcu_read_unlock();
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2019-12-04 14:35:52 +00:00
|
|
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
2011-03-01 21:19:07 +00:00
|
|
|
if (IS_ERR(dst)) {
|
|
|
|
err = PTR_ERR(dst);
|
|
|
|
dst = NULL;
|
2008-02-29 19:43:03 +00:00
|
|
|
goto done;
|
2011-03-01 21:19:07 +00:00
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
skb = dccp_make_response(sk, dst, req);
|
|
|
|
if (skb != NULL) {
|
|
|
|
struct dccp_hdr *dh = dccp_hdr(skb);
|
2016-06-27 19:05:28 +00:00
|
|
|
struct ipv6_txoptions *opt;
|
2006-03-21 06:01:29 +00:00
|
|
|
|
2006-11-10 19:43:06 +00:00
|
|
|
dh->dccph_checksum = dccp_v6_csum_finish(skb,
|
2013-10-09 22:21:29 +00:00
|
|
|
&ireq->ir_v6_loc_addr,
|
|
|
|
&ireq->ir_v6_rmt_addr);
|
|
|
|
fl6.daddr = ireq->ir_v6_rmt_addr;
|
2015-11-30 03:37:57 +00:00
|
|
|
rcu_read_lock();
|
2016-06-27 19:05:28 +00:00
|
|
|
opt = ireq->ipv6_opt;
|
|
|
|
if (!opt)
|
|
|
|
opt = rcu_dereference(np->opt);
|
2023-07-28 15:03:15 +00:00
|
|
|
err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt,
|
2023-09-21 20:28:11 +00:00
|
|
|
np->tclass, READ_ONCE(sk->sk_priority));
|
2015-11-30 03:37:57 +00:00
|
|
|
rcu_read_unlock();
|
2006-11-14 13:21:36 +00:00
|
|
|
err = net_xmit_eval(err);
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2006-02-01 01:53:37 +00:00
|
|
|
dst_release(dst);
|
2005-12-14 07:24:53 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dccp_v6_reqsk_destructor(struct request_sock *req)
|
|
|
|
{
|
2008-11-05 07:56:30 +00:00
|
|
|
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
|
2016-06-27 19:05:28 +00:00
|
|
|
kfree(inet_rsk(req)->ipv6_opt);
|
2013-10-09 22:21:29 +00:00
|
|
|
kfree_skb(inet_rsk(req)->pktopts);
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
2024-04-25 03:13:35 +00:00
|
|
|
static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb,
|
|
|
|
enum sk_rst_reason reason)
|
2005-12-14 07:24:53 +00:00
|
|
|
{
|
2011-04-22 04:53:02 +00:00
|
|
|
const struct ipv6hdr *rxip6h;
|
2005-12-14 07:24:53 +00:00
|
|
|
struct sk_buff *skb;
|
2011-03-12 21:22:43 +00:00
|
|
|
struct flowi6 fl6;
|
2009-06-02 05:19:30 +00:00
|
|
|
struct net *net = dev_net(skb_dst(rxskb)->dev);
|
2021-04-08 17:45:02 +00:00
|
|
|
struct dccp_v6_pernet *pn;
|
|
|
|
struct sock *ctl_sk;
|
2009-06-02 05:19:30 +00:00
|
|
|
struct dst_entry *dst;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
[DCCP]: Factor out common code for generating Resets
This factors code common to dccp_v{4,6}_ctl_send_reset into a separate function,
and adds support for filling in the Data 1 ... Data 3 fields from RFC 4340, 5.6.
It is useful to have this separate, since the following Reset codes will always
be generated from the control socket rather than via dccp_send_reset:
* Code 3, "No Connection", cf. 8.3.1;
* Code 4, "Packet Error" (identification for Data 1 added);
* Code 5, "Option Error" (identification for Data 1..3 added, will be used later);
* Code 6, "Mandatory Error" (same as Option Error);
* Code 7, "Connection Refused" (what on Earth is the difference to "No Connection"?);
* Code 8, "Bad Service Code";
* Code 9, "Too Busy";
* Code 10, "Bad Init Cookie" (not used).
Code 0 is not recommended by the RFC, the following codes would be used in
dccp_send_reset() instead, since they all relate to an established DCCP connection:
* Code 1, "Closed";
* Code 2, "Aborted";
* Code 11, "Aggression Penalty" (12.3).
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
2007-09-26 17:35:19 +00:00
|
|
|
if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
|
2005-12-14 07:24:53 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!ipv6_unicast_destination(rxskb))
|
2006-03-21 06:01:29 +00:00
|
|
|
return;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2021-04-08 17:45:02 +00:00
|
|
|
pn = net_generic(net, dccp_v6_pernet_id);
|
|
|
|
ctl_sk = pn->v6_ctl_sk;
|
2008-04-14 05:32:25 +00:00
|
|
|
skb = dccp_ctl_make_reset(ctl_sk, rxskb);
|
2006-03-21 06:01:29 +00:00
|
|
|
if (skb == NULL)
|
2006-12-10 18:01:18 +00:00
|
|
|
return;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2007-04-26 00:54:47 +00:00
|
|
|
rxip6h = ipv6_hdr(rxskb);
|
[DCCP]: Factor out common code for generating Resets
This factors code common to dccp_v{4,6}_ctl_send_reset into a separate function,
and adds support for filling in the Data 1 ... Data 3 fields from RFC 4340, 5.6.
It is useful to have this separate, since the following Reset codes will always
be generated from the control socket rather than via dccp_send_reset:
* Code 3, "No Connection", cf. 8.3.1;
* Code 4, "Packet Error" (identification for Data 1 added);
* Code 5, "Option Error" (identification for Data 1..3 added, will be used later);
* Code 6, "Mandatory Error" (same as Option Error);
* Code 7, "Connection Refused" (what on Earth is the difference to "No Connection"?);
* Code 8, "Bad Service Code";
* Code 9, "Too Busy";
* Code 10, "Bad Init Cookie" (not used).
Code 0 is not recommended by the RFC, the following codes would be used in
dccp_send_reset() instead, since they all relate to an established DCCP connection:
* Code 1, "Closed";
* Code 2, "Aborted";
* Code 11, "Aggression Penalty" (12.3).
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
2007-09-26 17:35:19 +00:00
|
|
|
dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
|
|
|
|
&rxip6h->daddr);
|
2006-11-10 19:43:06 +00:00
|
|
|
|
2011-03-12 21:22:43 +00:00
|
|
|
memset(&fl6, 0, sizeof(fl6));
|
2011-11-21 03:39:03 +00:00
|
|
|
fl6.daddr = rxip6h->saddr;
|
|
|
|
fl6.saddr = rxip6h->daddr;
|
2006-11-10 19:43:06 +00:00
|
|
|
|
2011-03-12 21:22:43 +00:00
|
|
|
fl6.flowi6_proto = IPPROTO_DCCP;
|
|
|
|
fl6.flowi6_oif = inet6_iif(rxskb);
|
2011-03-12 21:36:19 +00:00
|
|
|
fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
|
|
|
|
fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
|
2020-09-28 02:38:26 +00:00
|
|
|
security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6));
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
/* sk = NULL, but it is safe for now. RST socket required. */
|
2019-12-04 14:35:52 +00:00
|
|
|
dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
|
2011-03-01 21:19:07 +00:00
|
|
|
if (!IS_ERR(dst)) {
|
|
|
|
skb_dst_set(skb, dst);
|
2019-09-24 15:01:14 +00:00
|
|
|
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
|
2016-04-29 21:16:49 +00:00
|
|
|
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
|
|
|
DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
|
2011-03-01 21:19:07 +00:00
|
|
|
return;
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
2006-11-10 15:01:31 +00:00
|
|
|
static struct request_sock_ops dccp6_request_sock_ops = {
|
|
|
|
.family = AF_INET6,
|
|
|
|
.obj_size = sizeof(struct dccp6_request_sock),
|
|
|
|
.rtx_syn_ack = dccp_v6_send_response,
|
|
|
|
.send_ack = dccp_reqsk_send_ack,
|
|
|
|
.destructor = dccp_v6_reqsk_destructor,
|
|
|
|
.send_reset = dccp_v6_ctl_send_reset,
|
2012-04-12 22:16:05 +00:00
|
|
|
.syn_ack_timeout = dccp_syn_ack_timeout,
|
2006-11-10 15:01:31 +00:00
|
|
|
};
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct request_sock *req;
|
|
|
|
struct dccp_request_sock *dreq;
|
2013-10-09 22:21:29 +00:00
|
|
|
struct inet_request_sock *ireq;
|
2005-12-14 07:24:53 +00:00
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
2006-12-10 18:01:18 +00:00
|
|
|
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
|
2005-12-14 07:24:53 +00:00
|
|
|
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
|
|
|
|
|
|
|
|
if (skb->protocol == htons(ETH_P_IP))
|
|
|
|
return dccp_v4_conn_request(sk, skb);
|
|
|
|
|
|
|
|
if (!ipv6_unicast_destination(skb))
|
[DCCP]: Twice the wrong reset code in receiving connection-Requests
This fixes two bugs in processing of connection-Requests in
v{4,6}_conn_request:
1. Due to using the variable `reset_code', the Reset code generated
internally by dccp_parse_options() is overwritten with the
initialised value ("Too Busy") of reset_code, which is not what is
intended.
2. When receiving a connection-Request on a multicast or broadcast
address, no Reset should be generated, to avoid storms of such
packets. Instead of jumping to the `drop' label, the
v{4,6}_conn_request functions now return 0. Below is why in my
understanding this is correct:
When the conn_request function returns < 0, then the caller,
dccp_rcv_state_process(), returns 1. In all instances where
dccp_rcv_state_process is called (dccp_v4_do_rcv, dccp_v6_do_rcv,
and dccp_child_process), a return value of != 0 from
dccp_rcv_state_process() means that a Reset is generated.
If on the other hand the conn_request function returns 0, the
packet is discarded and no Reset is generated.
Note: There may be a related problem when sending the Response, due to
the following.
if (dccp_v6_send_response(sk, req, NULL))
goto drop_and_free;
/* ... */
drop_and_free:
return -1;
In this case, if send_response fails due to transmission errors, the
next thing that is generated is a Reset with a code "Too Busy". I
haven't been able to conjure up such a condition, but it might be good
to change the behaviour here also (not done by this patch).
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Ian McDonald <ian.mcdonald@jandi.co.nz>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 21:52:28 +00:00
|
|
|
return 0; /* discard, don't send a reset here */
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2021-03-17 16:55:15 +00:00
|
|
|
if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
|
|
|
|
__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
if (dccp_bad_service_code(sk, service)) {
|
[DCCP]: Twice the wrong reset code in receiving connection-Requests
This fixes two bugs in processing of connection-Requests in
v{4,6}_conn_request:
1. Due to using the variable `reset_code', the Reset code generated
internally by dccp_parse_options() is overwritten with the
initialised value ("Too Busy") of reset_code, which is not what is
intended.
2. When receiving a connection-Request on a multicast or broadcast
address, no Reset should be generated, to avoid storms of such
packets. Instead of jumping to the `drop' label, the
v{4,6}_conn_request functions now return 0. Below is why in my
understanding this is correct:
When the conn_request function returns < 0, then the caller,
dccp_rcv_state_process(), returns 1. In all instances where
dccp_rcv_state_process is called (dccp_v4_do_rcv, dccp_v6_do_rcv,
and dccp_child_process), a return value of != 0 from
dccp_rcv_state_process() means that a Reset is generated.
If on the other hand the conn_request function returns 0, the
packet is discarded and no Reset is generated.
Note: There may be a related problem when sending the Response, due to
the following.
if (dccp_v6_send_response(sk, req, NULL))
goto drop_and_free;
/* ... */
drop_and_free:
return -1;
In this case, if send_response fails due to transmission errors, the
next thing that is generated is a Reset with a code "Too Busy". I
haven't been able to conjure up such a condition, but it might be good
to change the behaviour here also (not done by this patch).
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Ian McDonald <ian.mcdonald@jandi.co.nz>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 21:52:28 +00:00
|
|
|
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
|
2005-12-14 07:24:53 +00:00
|
|
|
goto drop;
|
2006-12-10 18:01:18 +00:00
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
/*
|
2006-03-21 06:01:29 +00:00
|
|
|
* There are no SYN attacks on IPv6, yet...
|
2005-12-14 07:24:53 +00:00
|
|
|
*/
|
[DCCP]: Twice the wrong reset code in receiving connection-Requests
This fixes two bugs in processing of connection-Requests in
v{4,6}_conn_request:
1. Due to using the variable `reset_code', the Reset code generated
internally by dccp_parse_options() is overwritten with the
initialised value ("Too Busy") of reset_code, which is not what is
intended.
2. When receiving a connection-Request on a multicast or broadcast
address, no Reset should be generated, to avoid storms of such
packets. Instead of jumping to the `drop' label, the
v{4,6}_conn_request functions now return 0. Below is why in my
understanding this is correct:
When the conn_request function returns < 0, then the caller,
dccp_rcv_state_process(), returns 1. In all instances where
dccp_rcv_state_process is called (dccp_v4_do_rcv, dccp_v6_do_rcv,
and dccp_child_process), a return value of != 0 from
dccp_rcv_state_process() means that a Reset is generated.
If on the other hand the conn_request function returns 0, the
packet is discarded and no Reset is generated.
Note: There may be a related problem when sending the Response, due to
the following.
if (dccp_v6_send_response(sk, req, NULL))
goto drop_and_free;
/* ... */
drop_and_free:
return -1;
In this case, if send_response fails due to transmission errors, the
next thing that is generated is a Reset with a code "Too Busy". I
haven't been able to conjure up such a condition, but it might be good
to change the behaviour here also (not done by this patch).
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Ian McDonald <ian.mcdonald@jandi.co.nz>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 21:52:28 +00:00
|
|
|
dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
|
2005-12-14 07:24:53 +00:00
|
|
|
if (inet_csk_reqsk_queue_is_full(sk))
|
2006-03-21 06:01:29 +00:00
|
|
|
goto drop;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2016-10-26 16:27:57 +00:00
|
|
|
if (sk_acceptq_is_full(sk))
|
2005-12-14 07:24:53 +00:00
|
|
|
goto drop;
|
|
|
|
|
2015-10-05 04:08:11 +00:00
|
|
|
req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
|
2005-12-14 07:24:53 +00:00
|
|
|
if (req == NULL)
|
|
|
|
goto drop;
|
|
|
|
|
2008-11-05 07:55:49 +00:00
|
|
|
if (dccp_reqsk_init(req, dccp_sk(sk), skb))
|
|
|
|
goto drop_and_free;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2007-12-13 14:29:24 +00:00
|
|
|
dreq = dccp_rsk(req);
|
|
|
|
if (dccp_parse_options(sk, dreq, skb))
|
|
|
|
goto drop_and_free;
|
|
|
|
|
2013-10-09 22:21:29 +00:00
|
|
|
ireq = inet_rsk(req);
|
|
|
|
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
|
|
|
|
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
|
2015-03-12 23:44:10 +00:00
|
|
|
ireq->ireq_family = AF_INET6;
|
2018-04-07 20:42:41 +00:00
|
|
|
ireq->ir_mark = inet_request_mark(sk, skb);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2023-10-30 20:10:42 +00:00
|
|
|
if (security_inet_conn_request(sk, skb, req))
|
|
|
|
goto drop_and_free;
|
|
|
|
|
2014-09-27 16:50:56 +00:00
|
|
|
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
|
2005-12-14 07:24:53 +00:00
|
|
|
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
|
|
|
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
|
2017-06-30 10:07:58 +00:00
|
|
|
refcount_inc(&skb->users);
|
2013-10-09 22:21:29 +00:00
|
|
|
ireq->pktopts = skb;
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
2022-05-13 18:55:45 +00:00
|
|
|
ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
/* So that link locals have meaning */
|
2022-05-13 18:55:45 +00:00
|
|
|
if (!ireq->ir_iif &&
|
2013-10-09 22:21:29 +00:00
|
|
|
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
|
|
|
ireq->ir_iif = inet6_iif(skb);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2006-03-21 06:01:29 +00:00
|
|
|
/*
|
2005-12-14 07:24:53 +00:00
|
|
|
* Step 3: Process LISTEN state
|
|
|
|
*
|
2006-11-10 18:29:14 +00:00
|
|
|
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
|
2005-12-14 07:24:53 +00:00
|
|
|
*
|
2012-02-27 01:22:02 +00:00
|
|
|
* Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
|
2005-12-14 07:24:53 +00:00
|
|
|
*/
|
|
|
|
dreq->dreq_isr = dcb->dccpd_seq;
|
2012-02-27 01:22:02 +00:00
|
|
|
dreq->dreq_gsr = dreq->dreq_isr;
|
2006-11-13 15:31:50 +00:00
|
|
|
dreq->dreq_iss = dccp_v6_init_sequence(skb);
|
2012-02-27 01:22:02 +00:00
|
|
|
dreq->dreq_gss = dreq->dreq_iss;
|
2005-12-14 07:24:53 +00:00
|
|
|
dreq->dreq_service = service;
|
|
|
|
|
2013-03-17 08:23:34 +00:00
|
|
|
if (dccp_v6_send_response(sk, req))
|
2005-12-14 07:24:53 +00:00
|
|
|
goto drop_and_free;
|
|
|
|
|
Fix race for duplicate reqsk on identical SYN
When bonding is configured in BOND_MODE_BROADCAST mode, if two identical
SYN packets are received at the same time and processed on different CPUs,
it can potentially create the same sk (sock) but two different reqsk
(request_sock) in tcp_conn_request().
These two different reqsk will respond with two SYNACK packets, and since
the generation of the seq (ISN) incorporates a timestamp, the final two
SYNACK packets will have different seq values.
The consequence is that when the Client receives and replies with an ACK
to the earlier SYNACK packet, we will reset(RST) it.
========================================================================
This behavior is consistently reproducible in my local setup,
which comprises:
| NETA1 ------ NETB1 |
PC_A --- bond --- | | --- bond --- PC_B
| NETA2 ------ NETB2 |
- PC_A is the Server and has two network cards, NETA1 and NETA2. I have
bonded these two cards using BOND_MODE_BROADCAST mode and configured
them to be handled by different CPU.
- PC_B is the Client, also equipped with two network cards, NETB1 and
NETB2, which are also bonded and configured in BOND_MODE_BROADCAST mode.
If the client attempts a TCP connection to the server, it might encounter
a failure. Capturing packets from the server side reveals:
10.10.10.10.45182 > localhost: Flags [S], seq 320236027,
10.10.10.10.45182 > localhost: Flags [S], seq 320236027,
localhost > 10.10.10.10.45182: Flags [S.], seq 2967855116,
localhost > 10.10.10.10.45182: Flags [S.], seq 2967855123, <==
10.10.10.10.45182 > localhost: Flags [.], ack 4294967290,
10.10.10.10.45182 > localhost: Flags [.], ack 4294967290,
localhost > 10.10.10.10.45182: Flags [R], seq 2967855117, <==
localhost > 10.10.10.10.45182: Flags [R], seq 2967855117,
Two SYNACKs with different seq numbers are sent by localhost,
resulting in an anomaly.
========================================================================
The attempted solution is as follows:
Add a return value to inet_csk_reqsk_queue_hash_add() to confirm if the
ehash insertion is successful (Up to now, the reason for unsuccessful
insertion is that a reqsk for the same connection has already been
inserted). If the insertion fails, release the reqsk.
Due to the refcnt, Kuniyuki suggests also adding a return value check
for the DCCP module; if ehash insertion fails, indicating a successful
insertion of the same connection, simply release the reqsk as well.
Simultaneously, In the reqsk_queue_hash_req(), the start of the
req->rsk_timer is adjusted to be after successful insertion.
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: luoxuanqiang <luoxuanqiang@kylinos.cn>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20240621013929.1386815-1-luoxuanqiang@kylinos.cn
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-06-21 01:39:29 +00:00
|
|
|
if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
|
|
|
|
reqsk_free(req);
|
|
|
|
else
|
|
|
|
reqsk_put(req);
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
drop_and_free:
|
|
|
|
reqsk_free(req);
|
|
|
|
drop:
|
2016-04-27 23:44:28 +00:00
|
|
|
__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
|
2005-12-14 07:24:53 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-09-29 14:42:48 +00:00
|
|
|
static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
|
2005-12-14 07:24:53 +00:00
|
|
|
struct sk_buff *skb,
|
|
|
|
struct request_sock *req,
|
2015-10-22 15:20:46 +00:00
|
|
|
struct dst_entry *dst,
|
|
|
|
struct request_sock *req_unhash,
|
|
|
|
bool *own_req)
|
2005-12-14 07:24:53 +00:00
|
|
|
{
|
2013-10-09 22:21:29 +00:00
|
|
|
struct inet_request_sock *ireq = inet_rsk(req);
|
2015-09-29 14:42:48 +00:00
|
|
|
struct ipv6_pinfo *newnp;
|
|
|
|
const struct ipv6_pinfo *np = inet6_sk(sk);
|
2015-11-30 03:37:57 +00:00
|
|
|
struct ipv6_txoptions *opt;
|
2005-12-14 07:24:53 +00:00
|
|
|
struct inet_sock *newinet;
|
|
|
|
struct dccp6_sock *newdp6;
|
|
|
|
struct sock *newsk;
|
|
|
|
|
|
|
|
if (skb->protocol == htons(ETH_P_IP)) {
|
|
|
|
/*
|
|
|
|
* v6 mapped
|
|
|
|
*/
|
2015-10-22 15:20:46 +00:00
|
|
|
newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
|
|
|
|
req_unhash, own_req);
|
2006-03-21 06:01:29 +00:00
|
|
|
if (newsk == NULL)
|
2005-12-14 07:24:53 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
newdp6 = (struct dccp6_sock *)newsk;
|
|
|
|
newinet = inet_sk(newsk);
|
|
|
|
newinet->pinet6 = &newdp6->inet6;
|
|
|
|
newnp = inet6_sk(newsk);
|
|
|
|
|
|
|
|
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
|
|
|
|
|
2015-03-18 21:05:35 +00:00
|
|
|
newnp->saddr = newsk->sk_v6_rcv_saddr;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
|
|
|
|
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
|
|
|
|
newnp->pktoptions = NULL;
|
|
|
|
newnp->opt = NULL;
|
2017-05-09 23:59:54 +00:00
|
|
|
newnp->ipv6_mc_list = NULL;
|
|
|
|
newnp->ipv6_ac_list = NULL;
|
|
|
|
newnp->ipv6_fl_list = NULL;
|
2019-03-19 12:46:18 +00:00
|
|
|
newnp->mcast_oif = inet_iif(skb);
|
|
|
|
newnp->mcast_hops = ip_hdr(skb)->ttl;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
|
|
|
|
* here, dccp_create_openreq_child now does this for us, see the comment in
|
|
|
|
* that function for the gory details. -acme
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* It is tricky place. Until this moment IPv4 tcp
|
|
|
|
worked with IPv6 icsk.icsk_af_ops.
|
|
|
|
Sync it now.
|
|
|
|
*/
|
2005-12-14 07:26:10 +00:00
|
|
|
dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
return newsk;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (sk_acceptq_is_full(sk))
|
|
|
|
goto out_overflow;
|
|
|
|
|
2015-09-29 14:42:42 +00:00
|
|
|
if (!dst) {
|
2011-03-12 21:22:43 +00:00
|
|
|
struct flowi6 fl6;
|
|
|
|
|
2015-09-29 14:42:42 +00:00
|
|
|
dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
|
|
|
|
if (!dst)
|
2005-12-14 07:24:53 +00:00
|
|
|
goto out;
|
2006-03-21 06:01:29 +00:00
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
newsk = dccp_create_openreq_child(sk, req, skb);
|
|
|
|
if (newsk == NULL)
|
2010-10-21 11:06:43 +00:00
|
|
|
goto out_nonewsk;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No need to charge this sock to the relevant IPv6 refcnt debug socks
|
|
|
|
* count here, dccp_create_openreq_child now does this for us, see the
|
|
|
|
* comment in that function for the gory details. -acme
|
|
|
|
*/
|
|
|
|
|
2015-12-03 05:53:57 +00:00
|
|
|
ip6_dst_store(newsk, dst, NULL, NULL);
|
2006-03-21 06:01:29 +00:00
|
|
|
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
|
|
|
|
NETIF_F_TSO);
|
2005-12-14 07:24:53 +00:00
|
|
|
newdp6 = (struct dccp6_sock *)newsk;
|
|
|
|
newinet = inet_sk(newsk);
|
|
|
|
newinet->pinet6 = &newdp6->inet6;
|
|
|
|
newnp = inet6_sk(newsk);
|
|
|
|
|
|
|
|
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
|
|
|
|
|
2013-10-09 22:21:29 +00:00
|
|
|
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
|
|
|
|
newnp->saddr = ireq->ir_v6_loc_addr;
|
|
|
|
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
|
|
|
|
newsk->sk_bound_dev_if = ireq->ir_iif;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2006-03-21 06:01:29 +00:00
|
|
|
/* Now IPv6 options...
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
First: no IPv4 options.
|
|
|
|
*/
|
2011-04-21 09:45:37 +00:00
|
|
|
newinet->inet_opt = NULL;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
/* Clone RX bits */
|
|
|
|
newnp->rxopt.all = np->rxopt.all;
|
|
|
|
|
2017-05-09 23:59:54 +00:00
|
|
|
newnp->ipv6_mc_list = NULL;
|
|
|
|
newnp->ipv6_ac_list = NULL;
|
|
|
|
newnp->ipv6_fl_list = NULL;
|
2005-12-14 07:24:53 +00:00
|
|
|
newnp->pktoptions = NULL;
|
|
|
|
newnp->opt = NULL;
|
|
|
|
newnp->mcast_oif = inet6_iif(skb);
|
2007-04-26 00:54:47 +00:00
|
|
|
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2006-03-21 06:01:29 +00:00
|
|
|
/*
|
|
|
|
* Clone native IPv6 options from listening socket (if any)
|
|
|
|
*
|
|
|
|
* Yes, keeping reference count would be much more clever, but we make
|
|
|
|
* one more one thing there: reattach optmem to newsk.
|
2005-12-14 07:24:53 +00:00
|
|
|
*/
|
2016-06-27 19:05:28 +00:00
|
|
|
opt = ireq->ipv6_opt;
|
|
|
|
if (!opt)
|
|
|
|
opt = rcu_dereference(np->opt);
|
2015-11-30 03:37:57 +00:00
|
|
|
if (opt) {
|
|
|
|
opt = ipv6_dup_options(newsk, opt);
|
|
|
|
RCU_INIT_POINTER(newnp->opt, opt);
|
|
|
|
}
|
2005-12-14 07:26:10 +00:00
|
|
|
inet_csk(newsk)->icsk_ext_hdr_len = 0;
|
2015-11-30 03:37:57 +00:00
|
|
|
if (opt)
|
|
|
|
inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
|
|
|
|
opt->opt_flen;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
dccp_sync_mss(newsk, dst_mtu(dst));
|
|
|
|
|
2009-10-15 06:30:45 +00:00
|
|
|
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
|
|
|
|
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2010-10-21 11:06:43 +00:00
|
|
|
if (__inet_inherit_port(sk, newsk) < 0) {
|
inet: Fix kmemleak in tcp_v4/6_syn_recv_sock and dccp_v4/6_request_recv_sock
If in either of the above functions inet_csk_route_child_sock() or
__inet_inherit_port() fails, the newsk will not be freed:
unreferenced object 0xffff88022e8a92c0 (size 1592):
comm "softirq", pid 0, jiffies 4294946244 (age 726.160s)
hex dump (first 32 bytes):
0a 01 01 01 0a 01 01 02 00 00 00 00 a7 cc 16 00 ................
02 00 03 01 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<ffffffff8153d190>] kmemleak_alloc+0x21/0x3e
[<ffffffff810ab3e7>] kmem_cache_alloc+0xb5/0xc5
[<ffffffff8149b65b>] sk_prot_alloc.isra.53+0x2b/0xcd
[<ffffffff8149b784>] sk_clone_lock+0x16/0x21e
[<ffffffff814d711a>] inet_csk_clone_lock+0x10/0x7b
[<ffffffff814ebbc3>] tcp_create_openreq_child+0x21/0x481
[<ffffffff814e8fa5>] tcp_v4_syn_recv_sock+0x3a/0x23b
[<ffffffff814ec5ba>] tcp_check_req+0x29f/0x416
[<ffffffff814e8e10>] tcp_v4_do_rcv+0x161/0x2bc
[<ffffffff814eb917>] tcp_v4_rcv+0x6c9/0x701
[<ffffffff814cea9f>] ip_local_deliver_finish+0x70/0xc4
[<ffffffff814cec20>] ip_local_deliver+0x4e/0x7f
[<ffffffff814ce9f8>] ip_rcv_finish+0x1fc/0x233
[<ffffffff814cee68>] ip_rcv+0x217/0x267
[<ffffffff814a7bbe>] __netif_receive_skb+0x49e/0x553
[<ffffffff814a7cc3>] netif_receive_skb+0x50/0x82
This happens, because sk_clone_lock initializes sk_refcnt to 2, and thus
a single sock_put() is not enough to free the memory. Additionally, things
like xfrm, memcg, cookie_values,... may have been initialized.
We have to free them properly.
This is fixed by forcing a call to tcp_done(), ending up in
inet_csk_destroy_sock, doing the final sock_put(). tcp_done() is necessary,
because it ends up doing all the cleanup on xfrm, memcg, cookie_values,
xfrm,...
Before calling tcp_done, we have to set the socket to SOCK_DEAD, to
force it entering inet_csk_destroy_sock. To avoid the warning in
inet_csk_destroy_sock, inet_num has to be set to 0.
As inet_csk_destroy_sock does a dec on orphan_count, we first have to
increase it.
Calling tcp_done() allows us to remove the calls to
tcp_clear_xmit_timer() and tcp_cleanup_congestion_control().
A similar approach is taken for dccp by calling dccp_done().
This is in the kernel since 093d282321 (tproxy: fix hash locking issue
when using port redirection in __inet_inherit_port()), thus since
version >= 2.6.37.
Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-14 04:07:58 +00:00
|
|
|
inet_csk_prepare_forced_close(newsk);
|
|
|
|
dccp_done(newsk);
|
2010-10-21 11:06:43 +00:00
|
|
|
goto out;
|
|
|
|
}
|
tcp: fix race condition when creating child sockets from syncookies
When the TCP stack is in SYN flood mode, the server child socket is
created from the SYN cookie received in a TCP packet with the ACK flag
set.
The child socket is created when the server receives the first TCP
packet with a valid SYN cookie from the client. Usually, this packet
corresponds to the final step of the TCP 3-way handshake, the ACK
packet. But is also possible to receive a valid SYN cookie from the
first TCP data packet sent by the client, and thus create a child socket
from that SYN cookie.
Since a client socket is ready to send data as soon as it receives the
SYN+ACK packet from the server, the client can send the ACK packet (sent
by the TCP stack code), and the first data packet (sent by the userspace
program) almost at the same time, and thus the server will equally
receive the two TCP packets with valid SYN cookies almost at the same
instant.
When such event happens, the TCP stack code has a race condition that
occurs between the momement a lookup is done to the established
connections hashtable to check for the existence of a connection for the
same client, and the moment that the child socket is added to the
established connections hashtable. As a consequence, this race condition
can lead to a situation where we add two child sockets to the
established connections hashtable and deliver two sockets to the
userspace program to the same client.
This patch fixes the race condition by checking if an existing child
socket exists for the same client when we are adding the second child
socket to the established connections socket. If an existing child
socket exists, we drop the packet and discard the second child socket
to the same client.
Signed-off-by: Ricardo Dias <rdias@singlestore.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20201120111133.GA67501@rdias-suse-pc.lan
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-11-20 11:11:33 +00:00
|
|
|
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
|
2015-10-30 16:46:12 +00:00
|
|
|
/* Clone pktoptions received with SYN, if we own the req */
|
|
|
|
if (*own_req && ireq->pktopts) {
|
2023-02-10 00:22:01 +00:00
|
|
|
newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
|
2015-10-30 16:46:12 +00:00
|
|
|
consume_skb(ireq->pktopts);
|
|
|
|
ireq->pktopts = NULL;
|
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
return newsk;
|
|
|
|
|
|
|
|
out_overflow:
|
2016-04-27 23:44:39 +00:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
2010-10-21 11:06:43 +00:00
|
|
|
out_nonewsk:
|
|
|
|
dst_release(dst);
|
2005-12-14 07:24:53 +00:00
|
|
|
out:
|
2016-04-27 23:44:39 +00:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
2005-12-14 07:24:53 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The socket must have it's spinlock held when we get
|
|
|
|
* here.
|
|
|
|
*
|
|
|
|
* We have a potential double-lock case here, so even when
|
|
|
|
* doing backlog processing we use the BH locking scheme.
|
|
|
|
* This is because we cannot sleep with the original spinlock
|
|
|
|
* held.
|
|
|
|
*/
|
|
|
|
static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct sk_buff *opt_skb = NULL;
|
|
|
|
|
|
|
|
/* Imagine: socket is IPv6. IPv4 packet arrives,
|
|
|
|
goes to IPv4 receive handler and backlogged.
|
|
|
|
From backlog it always goes here. Kerboom...
|
|
|
|
Fortunately, dccp_rcv_established and rcv_established
|
|
|
|
handle them correctly, but it is not case with
|
|
|
|
dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (skb->protocol == htons(ETH_P_IP))
|
|
|
|
return dccp_v4_do_rcv(sk, skb);
|
|
|
|
|
2006-08-31 22:28:39 +00:00
|
|
|
if (sk_filter(sk, skb))
|
2005-12-14 07:24:53 +00:00
|
|
|
goto discard;
|
|
|
|
|
|
|
|
/*
|
2006-03-21 06:01:29 +00:00
|
|
|
* socket locking is here for SMP purposes as backlog rcv is currently
|
|
|
|
* called with bh processing disabled.
|
2005-12-14 07:24:53 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Do Stevens' IPV6_PKTOPTIONS.
|
|
|
|
|
|
|
|
Yes, guys, it is the only place in our code, where we
|
|
|
|
may make it not affecting IPv4.
|
|
|
|
The rest of code is protocol independent,
|
|
|
|
and I do not like idea to uglify IPv4.
|
|
|
|
|
|
|
|
Actually, all the idea behind IPV6_PKTOPTIONS
|
|
|
|
looks not very well thought. For now we latch
|
|
|
|
options, received in the last packet, enqueued
|
|
|
|
by tcp. Feel free to propose better solution.
|
2007-02-09 14:24:38 +00:00
|
|
|
--ANK (980728)
|
2005-12-14 07:24:53 +00:00
|
|
|
*/
|
|
|
|
if (np->rxopt.all)
|
2023-02-10 00:22:01 +00:00
|
|
|
opt_skb = skb_clone_and_charge_r(skb, sk);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
|
|
|
|
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
|
|
|
|
goto reset;
|
2017-08-31 05:28:01 +00:00
|
|
|
if (opt_skb)
|
|
|
|
goto ipv6_pktoptions;
|
2005-12-14 07:24:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-11-10 18:29:14 +00:00
|
|
|
/*
|
|
|
|
* Step 3: Process LISTEN state
|
|
|
|
* If S.state == LISTEN,
|
|
|
|
* If P.type == Request or P contains a valid Init Cookie option,
|
|
|
|
* (* Must scan the packet's options to check for Init
|
|
|
|
* Cookies. Only Init Cookies are processed here,
|
|
|
|
* however; other options are processed in Step 8. This
|
|
|
|
* scan need only be performed if the endpoint uses Init
|
|
|
|
* Cookies *)
|
|
|
|
* (* Generate a new socket and switch to that socket *)
|
|
|
|
* Set S := new socket for this port pair
|
|
|
|
* S.state = RESPOND
|
|
|
|
* Choose S.ISS (initial seqno) or set from Init Cookies
|
|
|
|
* Initialize S.GAR := S.ISS
|
|
|
|
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
|
|
|
|
* Continue with S.state == RESPOND
|
|
|
|
* (* A Response packet will be generated in Step 11 *)
|
|
|
|
* Otherwise,
|
|
|
|
* Generate Reset(No Connection) unless P.type == Reset
|
|
|
|
* Drop packet and return
|
|
|
|
*
|
|
|
|
* NOTE: the check for the packet types is done in
|
|
|
|
* dccp_rcv_state_process
|
|
|
|
*/
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
|
|
|
|
goto reset;
|
2017-08-31 05:28:01 +00:00
|
|
|
if (opt_skb)
|
|
|
|
goto ipv6_pktoptions;
|
2005-12-14 07:24:53 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
reset:
|
2024-04-25 03:13:35 +00:00
|
|
|
dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
|
2005-12-14 07:24:53 +00:00
|
|
|
discard:
|
2006-03-21 06:01:29 +00:00
|
|
|
if (opt_skb != NULL)
|
2005-12-14 07:24:53 +00:00
|
|
|
__kfree_skb(opt_skb);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 0;
|
2017-08-31 05:28:01 +00:00
|
|
|
|
|
|
|
/* Handling IPV6_PKTOPTIONS skb the similar
|
|
|
|
* way it's done for net/ipv6/tcp_ipv6.c
|
|
|
|
*/
|
|
|
|
ipv6_pktoptions:
|
|
|
|
if (!((1 << sk->sk_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) {
|
|
|
|
if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
|
2023-12-08 10:12:43 +00:00
|
|
|
WRITE_ONCE(np->mcast_oif, inet6_iif(opt_skb));
|
2017-08-31 05:28:01 +00:00
|
|
|
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
|
2023-09-12 16:02:01 +00:00
|
|
|
WRITE_ONCE(np->mcast_hops, ipv6_hdr(opt_skb)->hop_limit);
|
2017-08-31 05:28:01 +00:00
|
|
|
if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
|
|
|
|
np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
|
2023-09-12 16:02:09 +00:00
|
|
|
if (inet6_test_bit(REPFLOW, sk))
|
2017-08-31 05:28:01 +00:00
|
|
|
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
|
|
|
|
if (ipv6_opt_accepted(sk, opt_skb,
|
|
|
|
&DCCP_SKB_CB(opt_skb)->header.h6)) {
|
|
|
|
memmove(IP6CB(opt_skb),
|
|
|
|
&DCCP_SKB_CB(opt_skb)->header.h6,
|
|
|
|
sizeof(struct inet6_skb_parm));
|
|
|
|
opt_skb = xchg(&np->pktoptions, opt_skb);
|
|
|
|
} else {
|
|
|
|
__kfree_skb(opt_skb);
|
|
|
|
opt_skb = xchg(&np->pktoptions, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree_skb(opt_skb);
|
|
|
|
return 0;
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
2007-10-15 19:50:28 +00:00
|
|
|
static int dccp_v6_rcv(struct sk_buff *skb)
|
2005-12-14 07:24:53 +00:00
|
|
|
{
|
|
|
|
const struct dccp_hdr *dh;
|
2016-04-01 15:52:17 +00:00
|
|
|
bool refcounted;
|
2005-12-14 07:24:53 +00:00
|
|
|
struct sock *sk;
|
2006-11-10 19:43:06 +00:00
|
|
|
int min_cov;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2006-11-10 19:43:06 +00:00
|
|
|
/* Step 1: Check header basics */
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
if (dccp_invalid_packet(skb))
|
|
|
|
goto discard_it;
|
|
|
|
|
2006-11-10 19:43:06 +00:00
|
|
|
/* Step 1: If header checksum is incorrect, drop packet and return. */
|
2007-04-26 00:54:47 +00:00
|
|
|
if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
|
|
|
|
&ipv6_hdr(skb)->daddr)) {
|
2006-11-20 20:39:23 +00:00
|
|
|
DCCP_WARN("dropped packet with invalid checksum\n");
|
2006-11-10 19:43:06 +00:00
|
|
|
goto discard_it;
|
|
|
|
}
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
dh = dccp_hdr(skb);
|
|
|
|
|
2007-10-24 12:12:09 +00:00
|
|
|
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
|
2005-12-14 07:24:53 +00:00
|
|
|
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
|
|
|
|
|
|
|
|
if (dccp_packet_without_ack(skb))
|
|
|
|
DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
|
|
|
|
else
|
|
|
|
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
|
|
|
|
|
2015-10-14 00:12:54 +00:00
|
|
|
lookup:
|
2016-02-10 16:50:38 +00:00
|
|
|
sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
|
2014-10-17 16:17:20 +00:00
|
|
|
dh->dccph_sport, dh->dccph_dport,
|
2017-08-07 15:44:21 +00:00
|
|
|
inet6_iif(skb), 0, &refcounted);
|
2015-10-14 00:12:54 +00:00
|
|
|
if (!sk) {
|
2006-11-10 13:46:34 +00:00
|
|
|
dccp_pr_debug("failed to look up flow ID in table and "
|
|
|
|
"get corresponding socket\n");
|
2005-12-14 07:24:53 +00:00
|
|
|
goto no_dccp_socket;
|
2006-11-10 13:46:34 +00:00
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2006-03-21 06:01:29 +00:00
|
|
|
/*
|
2005-12-14 07:24:53 +00:00
|
|
|
* Step 2:
|
2006-12-10 18:01:18 +00:00
|
|
|
* ... or S.state == TIMEWAIT,
|
2005-12-14 07:24:53 +00:00
|
|
|
* Generate Reset(No Connection) unless P.type == Reset
|
|
|
|
* Drop packet and return
|
|
|
|
*/
|
2006-11-10 13:46:34 +00:00
|
|
|
if (sk->sk_state == DCCP_TIME_WAIT) {
|
|
|
|
dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
|
|
|
|
inet_twsk_put(inet_twsk(sk));
|
|
|
|
goto no_dccp_socket;
|
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2015-10-02 18:43:32 +00:00
|
|
|
if (sk->sk_state == DCCP_NEW_SYN_RECV) {
|
|
|
|
struct request_sock *req = inet_reqsk(sk);
|
2016-02-18 13:39:18 +00:00
|
|
|
struct sock *nsk;
|
2015-10-02 18:43:32 +00:00
|
|
|
|
|
|
|
sk = req->rsk_listener;
|
2016-02-18 13:39:18 +00:00
|
|
|
if (unlikely(sk->sk_state != DCCP_LISTEN)) {
|
2015-10-14 18:16:27 +00:00
|
|
|
inet_csk_reqsk_queue_drop_and_put(sk, req);
|
2015-10-14 00:12:54 +00:00
|
|
|
goto lookup;
|
|
|
|
}
|
2016-02-18 13:39:18 +00:00
|
|
|
sock_hold(sk);
|
2016-04-01 15:52:17 +00:00
|
|
|
refcounted = true;
|
2016-02-18 13:39:18 +00:00
|
|
|
nsk = dccp_check_req(sk, skb, req);
|
2015-10-02 18:43:32 +00:00
|
|
|
if (!nsk) {
|
|
|
|
reqsk_put(req);
|
2016-02-18 13:39:18 +00:00
|
|
|
goto discard_and_relse;
|
2015-10-02 18:43:32 +00:00
|
|
|
}
|
|
|
|
if (nsk == sk) {
|
|
|
|
reqsk_put(req);
|
|
|
|
} else if (dccp_child_process(sk, nsk, skb)) {
|
2024-04-25 03:13:35 +00:00
|
|
|
dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
|
2016-02-18 13:39:18 +00:00
|
|
|
goto discard_and_relse;
|
2015-10-02 18:43:32 +00:00
|
|
|
} else {
|
2016-02-18 13:39:18 +00:00
|
|
|
sock_put(sk);
|
2015-10-02 18:43:32 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2006-11-10 19:43:06 +00:00
|
|
|
/*
|
|
|
|
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
|
2006-12-10 18:01:18 +00:00
|
|
|
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
|
|
|
|
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
|
2006-11-10 19:43:06 +00:00
|
|
|
*/
|
|
|
|
min_cov = dccp_sk(sk)->dccps_pcrlen;
|
|
|
|
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
|
|
|
|
dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
|
|
|
|
dh->dccph_cscov, min_cov);
|
|
|
|
/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
|
|
|
|
goto discard_and_relse;
|
|
|
|
}
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
|
|
|
|
goto discard_and_relse;
|
2023-03-21 15:58:44 +00:00
|
|
|
nf_reset_ct(skb);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2016-11-03 00:14:41 +00:00
|
|
|
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
|
|
|
|
refcounted) ? -1 : 0;
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
no_dccp_socket:
|
|
|
|
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
|
|
|
|
goto discard_it;
|
|
|
|
/*
|
|
|
|
* Step 2:
|
2006-12-10 18:01:18 +00:00
|
|
|
* If no socket ...
|
2005-12-14 07:24:53 +00:00
|
|
|
* Generate Reset(No Connection) unless P.type == Reset
|
|
|
|
* Drop packet and return
|
|
|
|
*/
|
|
|
|
if (dh->dccph_type != DCCP_PKT_RESET) {
|
|
|
|
DCCP_SKB_CB(skb)->dccpd_reset_code =
|
|
|
|
DCCP_RESET_CODE_NO_CONNECTION;
|
2024-04-25 03:13:35 +00:00
|
|
|
dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
|
2005-12-14 07:24:53 +00:00
|
|
|
}
|
|
|
|
|
2006-11-10 13:46:34 +00:00
|
|
|
discard_it:
|
2005-12-14 07:24:53 +00:00
|
|
|
kfree_skb(skb);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
discard_and_relse:
|
2016-04-01 15:52:17 +00:00
|
|
|
if (refcounted)
|
|
|
|
sock_put(sk);
|
2005-12-14 07:24:53 +00:00
|
|
|
goto discard_it;
|
|
|
|
}
|
|
|
|
|
2006-11-10 15:01:31 +00:00
|
|
|
static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
|
|
|
int addr_len)
|
|
|
|
{
|
|
|
|
struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
2010-06-01 21:35:01 +00:00
|
|
|
struct in6_addr *saddr = NULL, *final_p, final;
|
2015-11-30 03:37:57 +00:00
|
|
|
struct ipv6_txoptions *opt;
|
2011-03-12 21:22:43 +00:00
|
|
|
struct flowi6 fl6;
|
2006-11-10 15:01:31 +00:00
|
|
|
struct dst_entry *dst;
|
|
|
|
int addr_type;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dp->dccps_role = DCCP_ROLE_CLIENT;
|
|
|
|
|
|
|
|
if (addr_len < SIN6_LEN_RFC2133)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (usin->sin6_family != AF_INET6)
|
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
|
2011-03-12 21:22:43 +00:00
|
|
|
memset(&fl6, 0, sizeof(fl6));
|
2006-11-10 15:01:31 +00:00
|
|
|
|
2023-09-12 16:02:12 +00:00
|
|
|
if (inet6_test_bit(SNDFLOW, sk)) {
|
2011-03-12 21:22:43 +00:00
|
|
|
fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
|
|
|
|
IP6_ECN_flow_init(fl6.flowlabel);
|
|
|
|
if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
|
2006-11-10 15:01:31 +00:00
|
|
|
struct ip6_flowlabel *flowlabel;
|
2011-03-12 21:22:43 +00:00
|
|
|
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
|
2019-07-07 09:34:45 +00:00
|
|
|
if (IS_ERR(flowlabel))
|
2006-11-10 15:01:31 +00:00
|
|
|
return -EINVAL;
|
|
|
|
fl6_sock_release(flowlabel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* connect() to INADDR_ANY means loopback (BSD'ism).
|
|
|
|
*/
|
|
|
|
if (ipv6_addr_any(&usin->sin6_addr))
|
|
|
|
usin->sin6_addr.s6_addr[15] = 1;
|
|
|
|
|
|
|
|
addr_type = ipv6_addr_type(&usin->sin6_addr);
|
|
|
|
|
|
|
|
if (addr_type & IPV6_ADDR_MULTICAST)
|
|
|
|
return -ENETUNREACH;
|
|
|
|
|
|
|
|
if (addr_type & IPV6_ADDR_LINKLOCAL) {
|
|
|
|
if (addr_len >= sizeof(struct sockaddr_in6) &&
|
|
|
|
usin->sin6_scope_id) {
|
|
|
|
/* If interface is set while binding, indices
|
|
|
|
* must coincide.
|
|
|
|
*/
|
|
|
|
if (sk->sk_bound_dev_if &&
|
|
|
|
sk->sk_bound_dev_if != usin->sin6_scope_id)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
sk->sk_bound_dev_if = usin->sin6_scope_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Connect to link-local address requires an interface */
|
|
|
|
if (!sk->sk_bound_dev_if)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-03 22:42:29 +00:00
|
|
|
sk->sk_v6_daddr = usin->sin6_addr;
|
2011-03-12 21:22:43 +00:00
|
|
|
np->flow_label = fl6.flowlabel;
|
2006-11-10 15:01:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* DCCP over IPv4
|
|
|
|
*/
|
|
|
|
if (addr_type == IPV6_ADDR_MAPPED) {
|
|
|
|
u32 exthdrlen = icsk->icsk_ext_hdr_len;
|
|
|
|
struct sockaddr_in sin;
|
|
|
|
|
2023-12-19 14:38:19 +00:00
|
|
|
net_dbg_ratelimited("connect: ipv4 mapped\n");
|
2006-11-10 15:01:31 +00:00
|
|
|
|
2022-04-20 01:58:50 +00:00
|
|
|
if (ipv6_only_sock(sk))
|
2006-11-10 15:01:31 +00:00
|
|
|
return -ENETUNREACH;
|
|
|
|
|
|
|
|
sin.sin_family = AF_INET;
|
|
|
|
sin.sin_port = usin->sin6_port;
|
|
|
|
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
|
|
|
|
|
|
|
|
icsk->icsk_af_ops = &dccp_ipv6_mapped;
|
|
|
|
sk->sk_backlog_rcv = dccp_v4_do_rcv;
|
|
|
|
|
|
|
|
err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
|
|
|
|
if (err) {
|
|
|
|
icsk->icsk_ext_hdr_len = exthdrlen;
|
|
|
|
icsk->icsk_af_ops = &dccp_ipv6_af_ops;
|
|
|
|
sk->sk_backlog_rcv = dccp_v6_do_rcv;
|
|
|
|
goto failure;
|
|
|
|
}
|
2015-03-18 21:05:35 +00:00
|
|
|
np->saddr = sk->sk_v6_rcv_saddr;
|
2006-11-10 15:01:31 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-03 22:42:29 +00:00
|
|
|
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
|
|
|
|
saddr = &sk->sk_v6_rcv_saddr;
|
2006-11-10 15:01:31 +00:00
|
|
|
|
2011-03-12 21:22:43 +00:00
|
|
|
fl6.flowi6_proto = IPPROTO_DCCP;
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-03 22:42:29 +00:00
|
|
|
fl6.daddr = sk->sk_v6_daddr;
|
2011-11-21 03:39:03 +00:00
|
|
|
fl6.saddr = saddr ? *saddr : np->saddr;
|
2011-03-12 21:22:43 +00:00
|
|
|
fl6.flowi6_oif = sk->sk_bound_dev_if;
|
2011-03-12 21:36:19 +00:00
|
|
|
fl6.fl6_dport = usin->sin6_port;
|
|
|
|
fl6.fl6_sport = inet->inet_sport;
|
2020-09-28 02:38:26 +00:00
|
|
|
security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
|
2006-11-10 15:01:31 +00:00
|
|
|
|
2016-04-05 15:10:15 +00:00
|
|
|
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
|
2015-11-30 03:37:57 +00:00
|
|
|
final_p = fl6_update_dst(&fl6, opt, &final);
|
2006-11-10 15:01:31 +00:00
|
|
|
|
2019-12-04 14:35:52 +00:00
|
|
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
2011-03-01 21:19:07 +00:00
|
|
|
if (IS_ERR(dst)) {
|
|
|
|
err = PTR_ERR(dst);
|
2006-11-10 15:01:31 +00:00
|
|
|
goto failure;
|
2007-05-25 01:17:54 +00:00
|
|
|
}
|
2006-11-10 15:01:31 +00:00
|
|
|
|
|
|
|
if (saddr == NULL) {
|
2011-03-12 21:22:43 +00:00
|
|
|
saddr = &fl6.saddr;
|
2022-11-19 01:49:13 +00:00
|
|
|
|
|
|
|
err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
|
|
|
|
if (err)
|
|
|
|
goto failure;
|
2006-11-10 15:01:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* set the source address */
|
2011-11-21 03:39:03 +00:00
|
|
|
np->saddr = *saddr;
|
2009-10-15 06:30:45 +00:00
|
|
|
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
|
2006-11-10 15:01:31 +00:00
|
|
|
|
2015-12-03 05:53:57 +00:00
|
|
|
ip6_dst_store(sk, dst, NULL, NULL);
|
2006-11-10 15:01:31 +00:00
|
|
|
|
|
|
|
icsk->icsk_ext_hdr_len = 0;
|
2015-11-30 03:37:57 +00:00
|
|
|
if (opt)
|
|
|
|
icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
|
2006-11-10 15:01:31 +00:00
|
|
|
|
2009-10-15 06:30:45 +00:00
|
|
|
inet->inet_dport = usin->sin6_port;
|
2006-11-10 15:01:31 +00:00
|
|
|
|
|
|
|
dccp_set_state(sk, DCCP_REQUESTING);
|
|
|
|
err = inet6_hash_connect(&dccp_death_row, sk);
|
|
|
|
if (err)
|
|
|
|
goto late_failure;
|
2006-11-13 15:34:38 +00:00
|
|
|
|
|
|
|
dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-03 22:42:29 +00:00
|
|
|
sk->sk_v6_daddr.s6_addr32,
|
2009-10-15 06:30:45 +00:00
|
|
|
inet->inet_sport,
|
|
|
|
inet->inet_dport);
|
2006-11-10 15:01:31 +00:00
|
|
|
err = dccp_connect(sk);
|
|
|
|
if (err)
|
|
|
|
goto late_failure;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
late_failure:
|
|
|
|
dccp_set_state(sk, DCCP_CLOSED);
|
2022-11-19 01:49:14 +00:00
|
|
|
inet_bhash2_reset_saddr(sk);
|
2006-11-10 15:01:31 +00:00
|
|
|
__sk_dst_reset(sk);
|
|
|
|
failure:
|
2009-10-15 06:30:45 +00:00
|
|
|
inet->inet_dport = 0;
|
2006-11-10 15:01:31 +00:00
|
|
|
sk->sk_route_caps = 0;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-09-01 19:25:04 +00:00
|
|
|
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
|
2006-03-21 06:48:35 +00:00
|
|
|
.queue_xmit = inet6_csk_xmit,
|
|
|
|
.send_check = dccp_v6_send_check,
|
|
|
|
.rebuild_header = inet6_sk_rebuild_header,
|
|
|
|
.conn_request = dccp_v6_conn_request,
|
|
|
|
.syn_recv_sock = dccp_v6_request_recv_sock,
|
|
|
|
.net_header_len = sizeof(struct ipv6hdr),
|
|
|
|
.setsockopt = ipv6_setsockopt,
|
|
|
|
.getsockopt = ipv6_getsockopt,
|
|
|
|
.addr2sockaddr = inet6_csk_addr2sockaddr,
|
|
|
|
.sockaddr_len = sizeof(struct sockaddr_in6),
|
2005-12-14 07:24:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DCCP over IPv4 via INET6 API
|
|
|
|
*/
|
2009-09-01 19:25:04 +00:00
|
|
|
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
|
2006-03-21 06:48:35 +00:00
|
|
|
.queue_xmit = ip_queue_xmit,
|
|
|
|
.send_check = dccp_v4_send_check,
|
|
|
|
.rebuild_header = inet_sk_rebuild_header,
|
|
|
|
.conn_request = dccp_v6_conn_request,
|
|
|
|
.syn_recv_sock = dccp_v6_request_recv_sock,
|
|
|
|
.net_header_len = sizeof(struct iphdr),
|
|
|
|
.setsockopt = ipv6_setsockopt,
|
|
|
|
.getsockopt = ipv6_getsockopt,
|
|
|
|
.addr2sockaddr = inet6_csk_addr2sockaddr,
|
|
|
|
.sockaddr_len = sizeof(struct sockaddr_in6),
|
2005-12-14 07:24:53 +00:00
|
|
|
};
|
|
|
|
|
2022-10-19 22:36:00 +00:00
|
|
|
static void dccp_v6_sk_destruct(struct sock *sk)
|
|
|
|
{
|
|
|
|
dccp_destruct_common(sk);
|
|
|
|
inet6_sock_destruct(sk);
|
|
|
|
}
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
/* NOTE: A lot of things set to zero explicitly by call to
|
|
|
|
* sk_alloc() so need not be done here.
|
|
|
|
*/
|
|
|
|
static int dccp_v6_init_sock(struct sock *sk)
|
|
|
|
{
|
2006-03-21 06:00:37 +00:00
|
|
|
static __u8 dccp_v6_ctl_sock_initialized;
|
|
|
|
int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
|
2005-12-14 07:24:53 +00:00
|
|
|
|
2006-03-21 06:00:37 +00:00
|
|
|
if (err == 0) {
|
|
|
|
if (unlikely(!dccp_v6_ctl_sock_initialized))
|
|
|
|
dccp_v6_ctl_sock_initialized = 1;
|
2005-12-14 07:24:53 +00:00
|
|
|
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
|
2022-10-19 22:36:00 +00:00
|
|
|
sk->sk_destruct = dccp_v6_sk_destruct;
|
2006-03-21 06:00:37 +00:00
|
|
|
}
|
2005-12-14 07:24:53 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-11-10 15:01:31 +00:00
|
|
|
static struct timewait_sock_ops dccp6_timewait_sock_ops = {
|
|
|
|
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
|
|
|
|
};
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
static struct proto dccp_v6_prot = {
|
2006-03-21 06:48:35 +00:00
|
|
|
.name = "DCCPv6",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.close = dccp_close,
|
|
|
|
.connect = dccp_v6_connect,
|
|
|
|
.disconnect = dccp_disconnect,
|
|
|
|
.ioctl = dccp_ioctl,
|
|
|
|
.init = dccp_v6_init_sock,
|
|
|
|
.setsockopt = dccp_setsockopt,
|
|
|
|
.getsockopt = dccp_getsockopt,
|
|
|
|
.sendmsg = dccp_sendmsg,
|
|
|
|
.recvmsg = dccp_recvmsg,
|
|
|
|
.backlog_rcv = dccp_v6_do_rcv,
|
2016-02-10 16:50:36 +00:00
|
|
|
.hash = inet6_hash,
|
[SOCK] proto: Add hashinfo member to struct proto
This way we can remove TCP and DCCP specific versions of
sk->sk_prot->get_port: both v4 and v6 use inet_csk_get_port
sk->sk_prot->hash: inet_hash is directly used, only v6 need
a specific version to deal with mapped sockets
sk->sk_prot->unhash: both v4 and v6 use inet_hash directly
struct inet_connection_sock_af_ops also gets a new member, bind_conflict, so
that inet_csk_get_port can find the per family routine.
Now only the lookup routines receive as a parameter a struct inet_hashtable.
With this we further reuse code, reducing the difference among INET transport
protocols.
Eventually work has to be done on UDP and SCTP to make them share this
infrastructure and get as a bonus inet_diag interfaces so that iproute can be
used with these protocols.
net-2.6/net/ipv4/inet_hashtables.c:
struct proto | +8
struct inet_connection_sock_af_ops | +8
2 structs changed
__inet_hash_nolisten | +18
__inet_hash | -210
inet_put_port | +8
inet_bind_bucket_create | +1
__inet_hash_connect | -8
5 functions changed, 27 bytes added, 218 bytes removed, diff: -191
net-2.6/net/core/sock.c:
proto_seq_show | +3
1 function changed, 3 bytes added, diff: +3
net-2.6/net/ipv4/inet_connection_sock.c:
inet_csk_get_port | +15
1 function changed, 15 bytes added, diff: +15
net-2.6/net/ipv4/tcp.c:
tcp_set_state | -7
1 function changed, 7 bytes removed, diff: -7
net-2.6/net/ipv4/tcp_ipv4.c:
tcp_v4_get_port | -31
tcp_v4_hash | -48
tcp_v4_destroy_sock | -7
tcp_v4_syn_recv_sock | -2
tcp_unhash | -179
5 functions changed, 267 bytes removed, diff: -267
net-2.6/net/ipv6/inet6_hashtables.c:
__inet6_hash | +8
1 function changed, 8 bytes added, diff: +8
net-2.6/net/ipv4/inet_hashtables.c:
inet_unhash | +190
inet_hash | +242
2 functions changed, 432 bytes added, diff: +432
vmlinux:
16 functions changed, 485 bytes added, 492 bytes removed, diff: -7
/home/acme/git/net-2.6/net/ipv6/tcp_ipv6.c:
tcp_v6_get_port | -31
tcp_v6_hash | -7
tcp_v6_syn_recv_sock | -9
3 functions changed, 47 bytes removed, diff: -47
/home/acme/git/net-2.6/net/dccp/proto.c:
dccp_destroy_sock | -7
dccp_unhash | -179
dccp_hash | -49
dccp_set_state | -7
dccp_done | +1
5 functions changed, 1 bytes added, 242 bytes removed, diff: -241
/home/acme/git/net-2.6/net/dccp/ipv4.c:
dccp_v4_get_port | -31
dccp_v4_request_recv_sock | -2
2 functions changed, 33 bytes removed, diff: -33
/home/acme/git/net-2.6/net/dccp/ipv6.c:
dccp_v6_get_port | -31
dccp_v6_hash | -7
dccp_v6_request_recv_sock | +5
3 functions changed, 5 bytes added, 38 bytes removed, diff: -33
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-02-03 12:06:04 +00:00
|
|
|
.unhash = inet_unhash,
|
2006-03-21 06:48:35 +00:00
|
|
|
.accept = inet_csk_accept,
|
[SOCK] proto: Add hashinfo member to struct proto
This way we can remove TCP and DCCP specific versions of
sk->sk_prot->get_port: both v4 and v6 use inet_csk_get_port
sk->sk_prot->hash: inet_hash is directly used, only v6 need
a specific version to deal with mapped sockets
sk->sk_prot->unhash: both v4 and v6 use inet_hash directly
struct inet_connection_sock_af_ops also gets a new member, bind_conflict, so
that inet_csk_get_port can find the per family routine.
Now only the lookup routines receive as a parameter a struct inet_hashtable.
With this we further reuse code, reducing the difference among INET transport
protocols.
Eventually work has to be done on UDP and SCTP to make them share this
infrastructure and get as a bonus inet_diag interfaces so that iproute can be
used with these protocols.
net-2.6/net/ipv4/inet_hashtables.c:
struct proto | +8
struct inet_connection_sock_af_ops | +8
2 structs changed
__inet_hash_nolisten | +18
__inet_hash | -210
inet_put_port | +8
inet_bind_bucket_create | +1
__inet_hash_connect | -8
5 functions changed, 27 bytes added, 218 bytes removed, diff: -191
net-2.6/net/core/sock.c:
proto_seq_show | +3
1 function changed, 3 bytes added, diff: +3
net-2.6/net/ipv4/inet_connection_sock.c:
inet_csk_get_port | +15
1 function changed, 15 bytes added, diff: +15
net-2.6/net/ipv4/tcp.c:
tcp_set_state | -7
1 function changed, 7 bytes removed, diff: -7
net-2.6/net/ipv4/tcp_ipv4.c:
tcp_v4_get_port | -31
tcp_v4_hash | -48
tcp_v4_destroy_sock | -7
tcp_v4_syn_recv_sock | -2
tcp_unhash | -179
5 functions changed, 267 bytes removed, diff: -267
net-2.6/net/ipv6/inet6_hashtables.c:
__inet6_hash | +8
1 function changed, 8 bytes added, diff: +8
net-2.6/net/ipv4/inet_hashtables.c:
inet_unhash | +190
inet_hash | +242
2 functions changed, 432 bytes added, diff: +432
vmlinux:
16 functions changed, 485 bytes added, 492 bytes removed, diff: -7
/home/acme/git/net-2.6/net/ipv6/tcp_ipv6.c:
tcp_v6_get_port | -31
tcp_v6_hash | -7
tcp_v6_syn_recv_sock | -9
3 functions changed, 47 bytes removed, diff: -47
/home/acme/git/net-2.6/net/dccp/proto.c:
dccp_destroy_sock | -7
dccp_unhash | -179
dccp_hash | -49
dccp_set_state | -7
dccp_done | +1
5 functions changed, 1 bytes added, 242 bytes removed, diff: -241
/home/acme/git/net-2.6/net/dccp/ipv4.c:
dccp_v4_get_port | -31
dccp_v4_request_recv_sock | -2
2 functions changed, 33 bytes removed, diff: -33
/home/acme/git/net-2.6/net/dccp/ipv6.c:
dccp_v6_get_port | -31
dccp_v6_hash | -7
dccp_v6_request_recv_sock | +5
3 functions changed, 5 bytes added, 38 bytes removed, diff: -33
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-02-03 12:06:04 +00:00
|
|
|
.get_port = inet_csk_get_port,
|
2006-03-21 06:48:35 +00:00
|
|
|
.shutdown = dccp_shutdown,
|
2022-10-19 22:36:00 +00:00
|
|
|
.destroy = dccp_destroy_sock,
|
2006-03-21 06:48:35 +00:00
|
|
|
.orphan_count = &dccp_orphan_count,
|
|
|
|
.max_header = MAX_DCCP_HEADER,
|
|
|
|
.obj_size = sizeof(struct dccp6_sock),
|
2023-07-20 11:09:01 +00:00
|
|
|
.ipv6_pinfo_offset = offsetof(struct dccp6_sock, inet6),
|
2017-01-18 10:53:44 +00:00
|
|
|
.slab_flags = SLAB_TYPESAFE_BY_RCU,
|
2006-03-21 06:48:35 +00:00
|
|
|
.rsk_prot = &dccp6_request_sock_ops,
|
|
|
|
.twsk_prot = &dccp6_timewait_sock_ops,
|
2008-03-22 23:50:58 +00:00
|
|
|
.h.hashinfo = &dccp_hashinfo,
|
2005-12-14 07:24:53 +00:00
|
|
|
};
|
|
|
|
|
2009-09-14 12:22:28 +00:00
|
|
|
static const struct inet6_protocol dccp_v6_protocol = {
|
2006-03-21 06:01:29 +00:00
|
|
|
.handler = dccp_v6_rcv,
|
|
|
|
.err_handler = dccp_v6_err,
|
|
|
|
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
|
2005-12-14 07:24:53 +00:00
|
|
|
};
|
|
|
|
|
2009-09-14 12:23:23 +00:00
|
|
|
static const struct proto_ops inet6_dccp_ops = {
|
2006-03-21 06:48:35 +00:00
|
|
|
.family = PF_INET6,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.release = inet6_release,
|
|
|
|
.bind = inet6_bind,
|
|
|
|
.connect = inet_stream_connect,
|
|
|
|
.socketpair = sock_no_socketpair,
|
|
|
|
.accept = inet_accept,
|
|
|
|
.getname = inet6_getname,
|
2018-06-28 16:43:44 +00:00
|
|
|
.poll = dccp_poll,
|
2006-03-21 06:48:35 +00:00
|
|
|
.ioctl = inet6_ioctl,
|
2019-04-17 20:51:48 +00:00
|
|
|
.gettstamp = sock_gettstamp,
|
2006-03-21 06:48:35 +00:00
|
|
|
.listen = inet_dccp_listen,
|
|
|
|
.shutdown = inet_shutdown,
|
|
|
|
.setsockopt = sock_common_setsockopt,
|
|
|
|
.getsockopt = sock_common_getsockopt,
|
|
|
|
.sendmsg = inet_sendmsg,
|
|
|
|
.recvmsg = sock_common_recvmsg,
|
|
|
|
.mmap = sock_no_mmap,
|
2006-03-21 06:45:21 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
2020-05-18 06:28:06 +00:00
|
|
|
.compat_ioctl = inet6_compat_ioctl,
|
2006-03-21 06:45:21 +00:00
|
|
|
#endif
|
2005-12-14 07:24:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct inet_protosw dccp_v6_protosw = {
|
|
|
|
.type = SOCK_DCCP,
|
|
|
|
.protocol = IPPROTO_DCCP,
|
|
|
|
.prot = &dccp_v6_prot,
|
|
|
|
.ops = &inet6_dccp_ops,
|
2005-12-14 07:26:10 +00:00
|
|
|
.flags = INET_PROTOSW_ICSK,
|
2005-12-14 07:24:53 +00:00
|
|
|
};
|
|
|
|
|
2010-01-17 03:35:32 +00:00
|
|
|
static int __net_init dccp_v6_init_net(struct net *net)
|
2008-04-14 05:32:02 +00:00
|
|
|
{
|
2021-04-08 17:45:02 +00:00
|
|
|
struct dccp_v6_pernet *pn = net_generic(net, dccp_v6_pernet_id);
|
|
|
|
|
2010-03-14 20:13:19 +00:00
|
|
|
if (dccp_hashinfo.bhash == NULL)
|
|
|
|
return -ESOCKTNOSUPPORT;
|
2008-04-14 05:32:45 +00:00
|
|
|
|
2021-04-08 17:45:02 +00:00
|
|
|
return inet_ctl_sock_create(&pn->v6_ctl_sk, PF_INET6,
|
2010-03-14 20:13:19 +00:00
|
|
|
SOCK_DCCP, IPPROTO_DCCP, net);
|
2008-04-14 05:32:02 +00:00
|
|
|
}
|
|
|
|
|
2010-01-17 03:35:32 +00:00
|
|
|
static void __net_exit dccp_v6_exit_net(struct net *net)
|
2008-04-14 05:32:02 +00:00
|
|
|
{
|
2021-04-08 17:45:02 +00:00
|
|
|
struct dccp_v6_pernet *pn = net_generic(net, dccp_v6_pernet_id);
|
|
|
|
|
|
|
|
inet_ctl_sock_destroy(pn->v6_ctl_sk);
|
2008-04-14 05:32:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations dccp_v6_ops = {
|
|
|
|
.init = dccp_v6_init_net,
|
|
|
|
.exit = dccp_v6_exit_net,
|
2021-04-08 17:45:02 +00:00
|
|
|
.id = &dccp_v6_pernet_id,
|
|
|
|
.size = sizeof(struct dccp_v6_pernet),
|
2008-04-14 05:32:02 +00:00
|
|
|
};
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
static int __init dccp_v6_init(void)
|
|
|
|
{
|
|
|
|
int err = proto_register(&dccp_v6_prot, 1);
|
|
|
|
|
2017-06-20 07:44:44 +00:00
|
|
|
if (err)
|
2005-12-14 07:24:53 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
inet6_register_protosw(&dccp_v6_protosw);
|
2006-03-21 06:00:37 +00:00
|
|
|
|
2008-04-14 05:32:02 +00:00
|
|
|
err = register_pernet_subsys(&dccp_v6_ops);
|
2017-06-20 07:44:44 +00:00
|
|
|
if (err)
|
2008-04-14 05:32:02 +00:00
|
|
|
goto out_destroy_ctl_sock;
|
2017-06-20 07:44:44 +00:00
|
|
|
|
|
|
|
err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
|
|
|
|
if (err)
|
|
|
|
goto out_unregister_proto;
|
|
|
|
|
2005-12-14 07:24:53 +00:00
|
|
|
out:
|
|
|
|
return err;
|
2017-06-20 07:44:44 +00:00
|
|
|
out_unregister_proto:
|
|
|
|
unregister_pernet_subsys(&dccp_v6_ops);
|
2008-04-14 05:32:02 +00:00
|
|
|
out_destroy_ctl_sock:
|
2006-03-21 06:00:37 +00:00
|
|
|
inet6_unregister_protosw(&dccp_v6_protosw);
|
2005-12-14 07:24:53 +00:00
|
|
|
proto_unregister(&dccp_v6_prot);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit dccp_v6_exit(void)
|
|
|
|
{
|
|
|
|
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
|
2017-06-20 07:44:44 +00:00
|
|
|
unregister_pernet_subsys(&dccp_v6_ops);
|
2005-12-14 07:24:53 +00:00
|
|
|
inet6_unregister_protosw(&dccp_v6_protosw);
|
|
|
|
proto_unregister(&dccp_v6_prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(dccp_v6_init);
|
|
|
|
module_exit(dccp_v6_exit);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
|
|
|
|
* values directly, Also cover the case where the protocol is not specified,
|
|
|
|
* i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
|
|
|
|
*/
|
2007-10-21 23:45:03 +00:00
|
|
|
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
|
|
|
|
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
|
2005-12-14 07:24:53 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
|
|
|
|
MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
|