2013-04-06 13:24:29 +00:00
|
|
|
/*
|
|
|
|
* IPv6 specific functions of netfilter core
|
|
|
|
*
|
|
|
|
* Rusty Russell (C) 2000 -- This code is GPL.
|
|
|
|
* Patrick McHardy (C) 2006-2012
|
|
|
|
*/
|
2005-08-10 02:39:00 +00:00
|
|
|
#include <linux/kernel.h>
|
2006-01-10 00:43:13 +00:00
|
|
|
#include <linux/init.h>
|
2005-08-10 02:39:00 +00:00
|
|
|
#include <linux/ipv6.h>
|
2005-08-10 02:42:34 +00:00
|
|
|
#include <linux/netfilter.h>
|
|
|
|
#include <linux/netfilter_ipv6.h>
|
2011-07-15 15:47:34 +00:00
|
|
|
#include <linux/export.h>
|
2013-05-17 03:56:10 +00:00
|
|
|
#include <net/addrconf.h>
|
2005-08-10 02:39:00 +00:00
|
|
|
#include <net/dst.h>
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/ip6_route.h>
|
2006-01-07 07:04:54 +00:00
|
|
|
#include <net/xfrm.h>
|
2007-12-05 09:24:48 +00:00
|
|
|
#include <net/netfilter/nf_queue.h>
|
2019-05-29 11:25:38 +00:00
|
|
|
#include <net/netfilter/nf_conntrack_bridge.h>
|
|
|
|
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
|
|
|
|
#include "../bridge/br_private.h"
|
2005-08-10 02:39:00 +00:00
|
|
|
|
netfilter: use actual socket sk rather than skb sk when routing harder
If netfilter changes the packet mark when mangling, the packet is
rerouted using the route_me_harder set of functions. Prior to this
commit, there's one big difference between route_me_harder and the
ordinary initial routing functions, described in the comment above
__ip_queue_xmit():
/* Note: skb->sk can be different from sk, in case of tunnels */
int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
That function goes on to correctly make use of sk->sk_bound_dev_if,
rather than skb->sk->sk_bound_dev_if. And indeed the comment is true: a
tunnel will receive a packet in ndo_start_xmit with an initial skb->sk.
It will make some transformations to that packet, and then it will send
the encapsulated packet out of a *new* socket. That new socket will
basically always have a different sk_bound_dev_if (otherwise there'd be
a routing loop). So for the purposes of routing the encapsulated packet,
the routing information as it pertains to the socket should come from
that socket's sk, rather than the packet's original skb->sk. For that
reason __ip_queue_xmit() and related functions all do the right thing.
One might argue that all tunnels should just call skb_orphan(skb) before
transmitting the encapsulated packet into the new socket. But tunnels do
*not* do this -- and this is wisely avoided in skb_scrub_packet() too --
because features like TSQ rely on skb->destructor() being called when
that buffer space is truely available again. Calling skb_orphan(skb) too
early would result in buffers filling up unnecessarily and accounting
info being all wrong. Instead, additional routing must take into account
the new sk, just as __ip_queue_xmit() notes.
So, this commit addresses the problem by fishing the correct sk out of
state->sk -- it's already set properly in the call to nf_hook() in
__ip_local_out(), which receives the sk as part of its normal
functionality. So we make sure to plumb state->sk through the various
route_me_harder functions, and then make correct use of it following the
example of __ip_queue_xmit().
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2020-10-29 02:56:06 +00:00
|
|
|
int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb)
|
2005-08-10 02:39:00 +00:00
|
|
|
{
|
2011-04-22 04:53:02 +00:00
|
|
|
const struct ipv6hdr *iph = ipv6_hdr(skb);
|
netfilter: use actual socket sk rather than skb sk when routing harder
If netfilter changes the packet mark when mangling, the packet is
rerouted using the route_me_harder set of functions. Prior to this
commit, there's one big difference between route_me_harder and the
ordinary initial routing functions, described in the comment above
__ip_queue_xmit():
/* Note: skb->sk can be different from sk, in case of tunnels */
int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
That function goes on to correctly make use of sk->sk_bound_dev_if,
rather than skb->sk->sk_bound_dev_if. And indeed the comment is true: a
tunnel will receive a packet in ndo_start_xmit with an initial skb->sk.
It will make some transformations to that packet, and then it will send
the encapsulated packet out of a *new* socket. That new socket will
basically always have a different sk_bound_dev_if (otherwise there'd be
a routing loop). So for the purposes of routing the encapsulated packet,
the routing information as it pertains to the socket should come from
that socket's sk, rather than the packet's original skb->sk. For that
reason __ip_queue_xmit() and related functions all do the right thing.
One might argue that all tunnels should just call skb_orphan(skb) before
transmitting the encapsulated packet into the new socket. But tunnels do
*not* do this -- and this is wisely avoided in skb_scrub_packet() too --
because features like TSQ rely on skb->destructor() being called when
that buffer space is truely available again. Calling skb_orphan(skb) too
early would result in buffers filling up unnecessarily and accounting
info being all wrong. Instead, additional routing must take into account
the new sk, just as __ip_queue_xmit() notes.
So, this commit addresses the problem by fishing the correct sk out of
state->sk -- it's already set properly in the call to nf_hook() in
__ip_local_out(), which receives the sk as part of its normal
functionality. So we make sure to plumb state->sk through the various
route_me_harder functions, and then make correct use of it following the
example of __ip_queue_xmit().
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2020-10-29 02:56:06 +00:00
|
|
|
struct sock *sk = sk_to_full_sk(sk_partial);
|
2022-04-19 13:47:00 +00:00
|
|
|
struct net_device *dev = skb_dst(skb)->dev;
|
2021-04-14 08:20:32 +00:00
|
|
|
struct flow_keys flkeys;
|
2012-08-26 17:14:08 +00:00
|
|
|
unsigned int hh_len;
|
2005-08-10 02:39:00 +00:00
|
|
|
struct dst_entry *dst;
|
2019-01-21 10:45:27 +00:00
|
|
|
int strict = (ipv6_addr_type(&iph->daddr) &
|
|
|
|
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
|
2011-03-12 21:22:43 +00:00
|
|
|
struct flowi6 fl6 = {
|
2022-04-19 13:47:01 +00:00
|
|
|
.flowi6_l3mdev = l3mdev_master_ifindex(dev),
|
2011-03-12 21:22:43 +00:00
|
|
|
.flowi6_mark = skb->mark,
|
2018-02-25 19:49:07 +00:00
|
|
|
.flowi6_uid = sock_net_uid(net, sk),
|
2011-03-12 21:22:43 +00:00
|
|
|
.daddr = iph->daddr,
|
|
|
|
.saddr = iph->saddr,
|
2024-06-06 10:23:31 +00:00
|
|
|
.flowlabel = ip6_flowinfo(iph),
|
2005-08-10 02:39:00 +00:00
|
|
|
};
|
2014-05-08 13:22:35 +00:00
|
|
|
int err;
|
2005-08-10 02:39:00 +00:00
|
|
|
|
2022-04-19 13:47:00 +00:00
|
|
|
if (sk && sk->sk_bound_dev_if)
|
|
|
|
fl6.flowi6_oif = sk->sk_bound_dev_if;
|
|
|
|
else if (strict)
|
|
|
|
fl6.flowi6_oif = dev->ifindex;
|
|
|
|
|
2021-04-14 08:20:32 +00:00
|
|
|
fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
|
2018-02-25 19:49:07 +00:00
|
|
|
dst = ip6_route_output(net, sk, &fl6);
|
2014-05-08 13:22:35 +00:00
|
|
|
err = dst->error;
|
|
|
|
if (err) {
|
2008-10-15 05:55:21 +00:00
|
|
|
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
|
2014-11-11 18:59:17 +00:00
|
|
|
net_dbg_ratelimited("ip6_route_me_harder: No more route\n");
|
2005-08-10 02:39:00 +00:00
|
|
|
dst_release(dst);
|
2014-05-08 13:22:35 +00:00
|
|
|
return err;
|
2005-08-10 02:39:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Drop old route. */
|
2009-06-02 05:19:30 +00:00
|
|
|
skb_dst_drop(skb);
|
2005-08-10 02:39:00 +00:00
|
|
|
|
2009-06-02 05:19:30 +00:00
|
|
|
skb_dst_set(skb, dst);
|
2010-04-15 10:37:18 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_XFRM
|
|
|
|
if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
|
2023-10-04 16:09:51 +00:00
|
|
|
xfrm_decode_session(net, skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
|
2010-04-15 10:37:18 +00:00
|
|
|
skb_dst_set(skb, NULL);
|
2018-02-25 19:49:07 +00:00
|
|
|
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
|
2011-03-02 21:27:41 +00:00
|
|
|
if (IS_ERR(dst))
|
2013-04-05 06:41:11 +00:00
|
|
|
return PTR_ERR(dst);
|
2010-04-15 10:37:18 +00:00
|
|
|
skb_dst_set(skb, dst);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-08-26 17:14:08 +00:00
|
|
|
/* Change in oif may mean change in hh_len. */
|
|
|
|
hh_len = skb_dst(skb)->dev->hard_header_len;
|
|
|
|
if (skb_headroom(skb) < hh_len &&
|
|
|
|
pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
|
|
|
|
0, GFP_ATOMIC))
|
2013-04-05 06:41:11 +00:00
|
|
|
return -ENOMEM;
|
2012-08-26 17:14:08 +00:00
|
|
|
|
2005-08-10 02:39:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ip6_route_me_harder);
|
|
|
|
|
2017-11-27 21:50:26 +00:00
|
|
|
static int nf_ip6_reroute(struct sk_buff *skb,
|
2007-12-05 09:26:33 +00:00
|
|
|
const struct nf_queue_entry *entry)
|
2005-08-10 02:42:34 +00:00
|
|
|
{
|
2007-12-05 09:26:33 +00:00
|
|
|
struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
|
2005-08-10 02:42:34 +00:00
|
|
|
|
2015-04-03 20:31:01 +00:00
|
|
|
if (entry->state.hook == NF_INET_LOCAL_OUT) {
|
2011-04-22 04:53:02 +00:00
|
|
|
const struct ipv6hdr *iph = ipv6_hdr(skb);
|
2005-08-10 02:42:34 +00:00
|
|
|
if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
|
2008-11-25 11:18:11 +00:00
|
|
|
!ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
|
|
|
|
skb->mark != rt_info->mark)
|
netfilter: use actual socket sk rather than skb sk when routing harder
If netfilter changes the packet mark when mangling, the packet is
rerouted using the route_me_harder set of functions. Prior to this
commit, there's one big difference between route_me_harder and the
ordinary initial routing functions, described in the comment above
__ip_queue_xmit():
/* Note: skb->sk can be different from sk, in case of tunnels */
int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
That function goes on to correctly make use of sk->sk_bound_dev_if,
rather than skb->sk->sk_bound_dev_if. And indeed the comment is true: a
tunnel will receive a packet in ndo_start_xmit with an initial skb->sk.
It will make some transformations to that packet, and then it will send
the encapsulated packet out of a *new* socket. That new socket will
basically always have a different sk_bound_dev_if (otherwise there'd be
a routing loop). So for the purposes of routing the encapsulated packet,
the routing information as it pertains to the socket should come from
that socket's sk, rather than the packet's original skb->sk. For that
reason __ip_queue_xmit() and related functions all do the right thing.
One might argue that all tunnels should just call skb_orphan(skb) before
transmitting the encapsulated packet into the new socket. But tunnels do
*not* do this -- and this is wisely avoided in skb_scrub_packet() too --
because features like TSQ rely on skb->destructor() being called when
that buffer space is truely available again. Calling skb_orphan(skb) too
early would result in buffers filling up unnecessarily and accounting
info being all wrong. Instead, additional routing must take into account
the new sk, just as __ip_queue_xmit() notes.
So, this commit addresses the problem by fishing the correct sk out of
state->sk -- it's already set properly in the call to nf_hook() in
__ip_local_out(), which receives the sk as part of its normal
functionality. So we make sure to plumb state->sk through the various
route_me_harder functions, and then make correct use of it following the
example of __ip_queue_xmit().
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2020-10-29 02:56:06 +00:00
|
|
|
return ip6_route_me_harder(entry->state.net, entry->state.sk, skb);
|
2005-08-10 02:42:34 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-02 09:17:00 +00:00
|
|
|
int __nf_ip6_route(struct net *net, struct dst_entry **dst,
|
|
|
|
struct flowi *fl, bool strict)
|
2007-12-05 09:22:05 +00:00
|
|
|
{
|
2011-04-04 15:00:54 +00:00
|
|
|
static const struct ipv6_pinfo fake_pinfo;
|
|
|
|
static const struct inet_sock fake_sk = {
|
|
|
|
/* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
|
|
|
|
.sk.sk_bound_dev_if = 1,
|
|
|
|
.pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
|
|
|
|
};
|
|
|
|
const void *sk = strict ? &fake_sk : NULL;
|
2011-10-19 11:23:06 +00:00
|
|
|
struct dst_entry *result;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
result = ip6_route_output(net, sk, &fl->u.ip6);
|
|
|
|
err = result->error;
|
|
|
|
if (err)
|
|
|
|
dst_release(result);
|
|
|
|
else
|
|
|
|
*dst = result;
|
|
|
|
return err;
|
2007-12-05 09:22:05 +00:00
|
|
|
}
|
2019-02-02 09:17:00 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__nf_ip6_route);
|
2007-12-05 09:22:05 +00:00
|
|
|
|
2019-05-29 11:25:38 +00:00
|
|
|
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
2019-09-13 08:13:09 +00:00
|
|
|
struct nf_bridge_frag_data *data,
|
2019-05-29 11:25:38 +00:00
|
|
|
int (*output)(struct net *, struct sock *sk,
|
2019-09-13 08:13:09 +00:00
|
|
|
const struct nf_bridge_frag_data *data,
|
2019-05-29 11:25:38 +00:00
|
|
|
struct sk_buff *))
|
|
|
|
{
|
|
|
|
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
|
2024-05-09 21:18:32 +00:00
|
|
|
u8 tstamp_type = skb->tstamp_type;
|
2019-10-17 01:00:56 +00:00
|
|
|
ktime_t tstamp = skb->tstamp;
|
2019-05-29 11:25:38 +00:00
|
|
|
struct ip6_frag_state state;
|
|
|
|
u8 *prevhdr, nexthdr = 0;
|
|
|
|
unsigned int mtu, hlen;
|
|
|
|
int hroom, err = 0;
|
|
|
|
__be32 frag_id;
|
|
|
|
|
|
|
|
err = ip6_find_1stfragopt(skb, &prevhdr);
|
|
|
|
if (err < 0)
|
|
|
|
goto blackhole;
|
|
|
|
hlen = err;
|
|
|
|
nexthdr = *prevhdr;
|
|
|
|
|
|
|
|
mtu = skb->dev->mtu;
|
|
|
|
if (frag_max_size > mtu ||
|
|
|
|
frag_max_size < IPV6_MIN_MTU)
|
|
|
|
goto blackhole;
|
|
|
|
|
|
|
|
mtu = frag_max_size;
|
|
|
|
if (mtu < hlen + sizeof(struct frag_hdr) + 8)
|
|
|
|
goto blackhole;
|
|
|
|
mtu -= hlen + sizeof(struct frag_hdr);
|
|
|
|
|
|
|
|
frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
|
|
|
|
&ipv6_hdr(skb)->saddr);
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
|
|
|
(err = skb_checksum_help(skb)))
|
|
|
|
goto blackhole;
|
|
|
|
|
|
|
|
hroom = LL_RESERVED_SPACE(skb->dev);
|
|
|
|
if (skb_has_frag_list(skb)) {
|
|
|
|
unsigned int first_len = skb_pagelen(skb);
|
|
|
|
struct ip6_fraglist_iter iter;
|
|
|
|
struct sk_buff *frag2;
|
|
|
|
|
|
|
|
if (first_len - hlen > mtu ||
|
|
|
|
skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
|
|
|
|
goto blackhole;
|
|
|
|
|
|
|
|
if (skb_cloned(skb))
|
|
|
|
goto slow_path;
|
|
|
|
|
|
|
|
skb_walk_frags(skb, frag2) {
|
|
|
|
if (frag2->len > mtu ||
|
|
|
|
skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
|
|
|
|
goto blackhole;
|
|
|
|
|
|
|
|
/* Partially cloned skb? */
|
|
|
|
if (skb_shared(frag2))
|
|
|
|
goto slow_path;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
|
|
|
|
&iter);
|
|
|
|
if (err < 0)
|
|
|
|
goto blackhole;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
/* Prepare header of the next frame,
|
|
|
|
* before previous one went down.
|
|
|
|
*/
|
|
|
|
if (iter.frag)
|
|
|
|
ip6_fraglist_prepare(skb, &iter);
|
|
|
|
|
2024-05-09 21:18:32 +00:00
|
|
|
skb_set_delivery_time(skb, tstamp, tstamp_type);
|
2019-05-29 11:25:38 +00:00
|
|
|
err = output(net, sk, data, skb);
|
|
|
|
if (err || !iter.frag)
|
|
|
|
break;
|
|
|
|
|
|
|
|
skb = ip6_fraglist_next(&iter);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(iter.tmp_hdr);
|
|
|
|
if (!err)
|
|
|
|
return 0;
|
|
|
|
|
2019-06-02 18:24:18 +00:00
|
|
|
kfree_skb_list(iter.frag);
|
2019-05-29 11:25:38 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
slow_path:
|
|
|
|
/* This is a linearized skbuff, the original geometry is lost for us.
|
|
|
|
* This may also be a clone skbuff, we could preserve the geometry for
|
|
|
|
* the copies but probably not worth the effort.
|
|
|
|
*/
|
|
|
|
ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom,
|
|
|
|
LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id,
|
|
|
|
&state);
|
|
|
|
|
|
|
|
while (state.left > 0) {
|
|
|
|
struct sk_buff *skb2;
|
|
|
|
|
|
|
|
skb2 = ip6_frag_next(skb, &state);
|
|
|
|
if (IS_ERR(skb2)) {
|
|
|
|
err = PTR_ERR(skb2);
|
|
|
|
goto blackhole;
|
|
|
|
}
|
|
|
|
|
2024-05-09 21:18:32 +00:00
|
|
|
skb_set_delivery_time(skb2, tstamp, tstamp_type);
|
2019-05-29 11:25:38 +00:00
|
|
|
err = output(net, sk, data, skb2);
|
|
|
|
if (err)
|
|
|
|
goto blackhole;
|
|
|
|
}
|
|
|
|
consume_skb(skb);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
blackhole:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_ip6_fragment);
|
|
|
|
|
2013-05-17 03:56:10 +00:00
|
|
|
static const struct nf_ipv6_ops ipv6ops = {
|
2019-02-02 09:16:59 +00:00
|
|
|
#if IS_MODULE(CONFIG_IPV6)
|
2017-12-20 15:04:18 +00:00
|
|
|
.chk_addr = ipv6_chk_addr,
|
2019-02-02 09:16:59 +00:00
|
|
|
.route_me_harder = ip6_route_me_harder,
|
|
|
|
.dev_get_saddr = ipv6_dev_get_saddr,
|
2019-02-02 09:17:00 +00:00
|
|
|
.route = __nf_ip6_route,
|
2019-06-19 12:54:36 +00:00
|
|
|
#if IS_ENABLED(CONFIG_SYN_COOKIES)
|
2019-06-07 00:36:05 +00:00
|
|
|
.cookie_init_sequence = __cookie_v6_init_sequence,
|
|
|
|
.cookie_v6_check = __cookie_v6_check,
|
2019-06-19 12:54:36 +00:00
|
|
|
#endif
|
2019-02-02 09:16:59 +00:00
|
|
|
#endif
|
2019-02-02 09:17:00 +00:00
|
|
|
.route_input = ip6_route_input,
|
2017-12-20 15:04:18 +00:00
|
|
|
.fragment = ip6_fragment,
|
2017-11-27 21:50:26 +00:00
|
|
|
.reroute = nf_ip6_reroute,
|
2019-06-02 13:49:26 +00:00
|
|
|
#if IS_MODULE(CONFIG_IPV6)
|
2019-05-29 11:25:38 +00:00
|
|
|
.br_fragment = br_ip6_fragment,
|
|
|
|
#endif
|
2013-05-17 03:56:10 +00:00
|
|
|
};
|
|
|
|
|
2005-08-10 02:42:34 +00:00
|
|
|
int __init ipv6_netfilter_init(void)
|
|
|
|
{
|
2013-05-17 03:56:10 +00:00
|
|
|
RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
|
2017-12-09 16:05:53 +00:00
|
|
|
return 0;
|
2005-08-10 02:42:34 +00:00
|
|
|
}
|
|
|
|
|
2006-01-11 05:02:21 +00:00
|
|
|
/* This can be called from inet6_init() on errors, so it cannot
|
|
|
|
* be marked __exit. -DaveM
|
|
|
|
*/
|
|
|
|
void ipv6_netfilter_fini(void)
|
2005-08-10 02:42:34 +00:00
|
|
|
{
|
2013-05-17 03:56:10 +00:00
|
|
|
RCU_INIT_POINTER(nf_ipv6_ops, NULL);
|
2005-08-10 02:42:34 +00:00
|
|
|
}
|