mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
46d6c5ae95
If netfilter changes the packet mark when mangling, the packet is
rerouted using the route_me_harder set of functions. Prior to this
commit, there's one big difference between route_me_harder and the
ordinary initial routing functions, described in the comment above
__ip_queue_xmit():
/* Note: skb->sk can be different from sk, in case of tunnels */
int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
That function goes on to correctly make use of sk->sk_bound_dev_if,
rather than skb->sk->sk_bound_dev_if. And indeed the comment is true: a
tunnel will receive a packet in ndo_start_xmit with an initial skb->sk.
It will make some transformations to that packet, and then it will send
the encapsulated packet out of a *new* socket. That new socket will
basically always have a different sk_bound_dev_if (otherwise there'd be
a routing loop). So for the purposes of routing the encapsulated packet,
the routing information as it pertains to the socket should come from
that socket's sk, rather than the packet's original skb->sk. For that
reason __ip_queue_xmit() and related functions all do the right thing.
One might argue that all tunnels should just call skb_orphan(skb) before
transmitting the encapsulated packet into the new socket. But tunnels do
*not* do this -- and this is wisely avoided in skb_scrub_packet() too --
because features like TSQ rely on skb->destructor() being called when
that buffer space is truely available again. Calling skb_orphan(skb) too
early would result in buffers filling up unnecessarily and accounting
info being all wrong. Instead, additional routing must take into account
the new sk, just as __ip_queue_xmit() notes.
So, this commit addresses the problem by fishing the correct sk out of
state->sk -- it's already set properly in the call to nf_hook() in
__ip_local_out(), which receives the sk as part of its normal
functionality. So we make sure to plumb state->sk through the various
route_me_harder functions, and then make correct use of it following the
example of __ip_queue_xmit().
Fixes: 1da177e4c3
("Linux-2.6.12-rc2")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
218 lines
5.3 KiB
C
218 lines
5.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/kernel.h>
|
|
#include <linux/netfilter.h>
|
|
#include <linux/netfilter_ipv4.h>
|
|
#include <linux/netfilter_ipv6.h>
|
|
#include <net/netfilter/nf_queue.h>
|
|
#include <net/ip6_checksum.h>
|
|
|
|
#ifdef CONFIG_INET
|
|
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
|
|
unsigned int dataoff, u8 protocol)
|
|
{
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
__sum16 csum = 0;
|
|
|
|
switch (skb->ip_summed) {
|
|
case CHECKSUM_COMPLETE:
|
|
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
|
|
break;
|
|
if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
|
|
!csum_fold(skb->csum)) ||
|
|
!csum_tcpudp_magic(iph->saddr, iph->daddr,
|
|
skb->len - dataoff, protocol,
|
|
skb->csum)) {
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
break;
|
|
}
|
|
fallthrough;
|
|
case CHECKSUM_NONE:
|
|
if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
|
|
skb->csum = 0;
|
|
else
|
|
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
|
|
skb->len - dataoff,
|
|
protocol, 0);
|
|
csum = __skb_checksum_complete(skb);
|
|
}
|
|
return csum;
|
|
}
|
|
EXPORT_SYMBOL(nf_ip_checksum);
|
|
#endif
|
|
|
|
static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
|
unsigned int dataoff, unsigned int len,
|
|
u8 protocol)
|
|
{
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
__sum16 csum = 0;
|
|
|
|
switch (skb->ip_summed) {
|
|
case CHECKSUM_COMPLETE:
|
|
if (len == skb->len - dataoff)
|
|
return nf_ip_checksum(skb, hook, dataoff, protocol);
|
|
fallthrough;
|
|
case CHECKSUM_NONE:
|
|
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
|
|
skb->len - dataoff, 0);
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
return __skb_checksum_complete_head(skb, dataoff + len);
|
|
}
|
|
return csum;
|
|
}
|
|
|
|
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
|
|
unsigned int dataoff, u8 protocol)
|
|
{
|
|
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
|
__sum16 csum = 0;
|
|
|
|
switch (skb->ip_summed) {
|
|
case CHECKSUM_COMPLETE:
|
|
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
|
|
break;
|
|
if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
|
|
skb->len - dataoff, protocol,
|
|
csum_sub(skb->csum,
|
|
skb_checksum(skb, 0,
|
|
dataoff, 0)))) {
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
break;
|
|
}
|
|
fallthrough;
|
|
case CHECKSUM_NONE:
|
|
skb->csum = ~csum_unfold(
|
|
csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
|
|
skb->len - dataoff,
|
|
protocol,
|
|
csum_sub(0,
|
|
skb_checksum(skb, 0,
|
|
dataoff, 0))));
|
|
csum = __skb_checksum_complete(skb);
|
|
}
|
|
return csum;
|
|
}
|
|
EXPORT_SYMBOL(nf_ip6_checksum);
|
|
|
|
static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
|
unsigned int dataoff, unsigned int len,
|
|
u8 protocol)
|
|
{
|
|
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
|
__wsum hsum;
|
|
__sum16 csum = 0;
|
|
|
|
switch (skb->ip_summed) {
|
|
case CHECKSUM_COMPLETE:
|
|
if (len == skb->len - dataoff)
|
|
return nf_ip6_checksum(skb, hook, dataoff, protocol);
|
|
fallthrough;
|
|
case CHECKSUM_NONE:
|
|
hsum = skb_checksum(skb, 0, dataoff, 0);
|
|
skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
|
|
&ip6h->daddr,
|
|
skb->len - dataoff,
|
|
protocol,
|
|
csum_sub(0, hsum)));
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
return __skb_checksum_complete_head(skb, dataoff + len);
|
|
}
|
|
return csum;
|
|
};
|
|
|
|
__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
|
|
unsigned int dataoff, u8 protocol,
|
|
unsigned short family)
|
|
{
|
|
__sum16 csum = 0;
|
|
|
|
switch (family) {
|
|
case AF_INET:
|
|
csum = nf_ip_checksum(skb, hook, dataoff, protocol);
|
|
break;
|
|
case AF_INET6:
|
|
csum = nf_ip6_checksum(skb, hook, dataoff, protocol);
|
|
break;
|
|
}
|
|
|
|
return csum;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_checksum);
|
|
|
|
__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
|
unsigned int dataoff, unsigned int len,
|
|
u8 protocol, unsigned short family)
|
|
{
|
|
__sum16 csum = 0;
|
|
|
|
switch (family) {
|
|
case AF_INET:
|
|
csum = nf_ip_checksum_partial(skb, hook, dataoff, len,
|
|
protocol);
|
|
break;
|
|
case AF_INET6:
|
|
csum = nf_ip6_checksum_partial(skb, hook, dataoff, len,
|
|
protocol);
|
|
break;
|
|
}
|
|
|
|
return csum;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_checksum_partial);
|
|
|
|
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
|
bool strict, unsigned short family)
|
|
{
|
|
const struct nf_ipv6_ops *v6ops __maybe_unused;
|
|
int ret = 0;
|
|
|
|
switch (family) {
|
|
case AF_INET:
|
|
ret = nf_ip_route(net, dst, fl, strict);
|
|
break;
|
|
case AF_INET6:
|
|
ret = nf_ip6_route(net, dst, fl, strict);
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_route);
|
|
|
|
static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
|
|
{
|
|
#ifdef CONFIG_INET
|
|
const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
|
|
|
|
if (entry->state.hook == NF_INET_LOCAL_OUT) {
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
|
|
if (!(iph->tos == rt_info->tos &&
|
|
skb->mark == rt_info->mark &&
|
|
iph->daddr == rt_info->daddr &&
|
|
iph->saddr == rt_info->saddr))
|
|
return ip_route_me_harder(entry->state.net, entry->state.sk,
|
|
skb, RTN_UNSPEC);
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
|
|
{
|
|
const struct nf_ipv6_ops *v6ops;
|
|
int ret = 0;
|
|
|
|
switch (entry->state.pf) {
|
|
case AF_INET:
|
|
ret = nf_ip_reroute(skb, entry);
|
|
break;
|
|
case AF_INET6:
|
|
v6ops = rcu_dereference(nf_ipv6_ops);
|
|
if (v6ops)
|
|
ret = v6ops->reroute(skb, entry);
|
|
break;
|
|
}
|
|
return ret;
|
|
}
|