mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 08:01:59 +00:00
netfilter: ipv6: avoid nf_iterate recursion
The previous patch changed nf_ct_frag6_gather() to morph reassembled skb with the previous one. This means that the return value is always NULL or the skb argument. So change it to an err value. Instead of invoking NF_HOOK recursively with threshold to skip already-called hooks we can now just return NF_ACCEPT to move on to the next hook except for -EINPROGRESS (which means skb has been queued for reassembly), in which case we return NF_STOLEN. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
029f7f3b87
commit
daaa7d647f
@ -5,7 +5,7 @@ void nf_defrag_ipv6_enable(void);
|
|||||||
|
|
||||||
int nf_ct_frag6_init(void);
|
int nf_ct_frag6_init(void);
|
||||||
void nf_ct_frag6_cleanup(void);
|
void nf_ct_frag6_cleanup(void);
|
||||||
struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
|
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
|
||||||
|
|
||||||
struct inet_frags_ctl;
|
struct inet_frags_ctl;
|
||||||
|
|
||||||
|
@ -361,14 +361,15 @@ err:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if this packet is complete.
|
* Check if this packet is complete.
|
||||||
* Returns NULL on failure by any reason, and pointer
|
|
||||||
* to current nexthdr field in reassembled frame.
|
|
||||||
*
|
*
|
||||||
* It is called with locked fq, and caller must check that
|
* It is called with locked fq, and caller must check that
|
||||||
* queue is eligible for reassembly i.e. it is not COMPLETE,
|
* queue is eligible for reassembly i.e. it is not COMPLETE,
|
||||||
* the last and the first frames arrived and all the bits are here.
|
* the last and the first frames arrived and all the bits are here.
|
||||||
|
*
|
||||||
|
* returns true if *prev skb has been transformed into the reassembled
|
||||||
|
* skb, false otherwise.
|
||||||
*/
|
*/
|
||||||
static struct sk_buff *
|
static bool
|
||||||
nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
|
nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct sk_buff *fp, *head = fq->q.fragments;
|
struct sk_buff *fp, *head = fq->q.fragments;
|
||||||
@ -382,22 +383,21 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|||||||
|
|
||||||
ecn = ip_frag_ecn_table[fq->ecn];
|
ecn = ip_frag_ecn_table[fq->ecn];
|
||||||
if (unlikely(ecn == 0xff))
|
if (unlikely(ecn == 0xff))
|
||||||
goto out_fail;
|
return false;
|
||||||
|
|
||||||
/* Unfragmented part is taken from the first segment. */
|
/* Unfragmented part is taken from the first segment. */
|
||||||
payload_len = ((head->data - skb_network_header(head)) -
|
payload_len = ((head->data - skb_network_header(head)) -
|
||||||
sizeof(struct ipv6hdr) + fq->q.len -
|
sizeof(struct ipv6hdr) + fq->q.len -
|
||||||
sizeof(struct frag_hdr));
|
sizeof(struct frag_hdr));
|
||||||
if (payload_len > IPV6_MAXPLEN) {
|
if (payload_len > IPV6_MAXPLEN) {
|
||||||
pr_debug("payload len is too large.\n");
|
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
|
||||||
goto out_oversize;
|
payload_len);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Head of list must not be cloned. */
|
/* Head of list must not be cloned. */
|
||||||
if (skb_unclone(head, GFP_ATOMIC)) {
|
if (skb_unclone(head, GFP_ATOMIC))
|
||||||
pr_debug("skb is cloned but can't expand head");
|
return false;
|
||||||
goto out_oom;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If the first fragment is fragmented itself, we split
|
/* If the first fragment is fragmented itself, we split
|
||||||
* it to two chunks: the first with data and paged part
|
* it to two chunks: the first with data and paged part
|
||||||
@ -408,7 +408,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|||||||
|
|
||||||
clone = alloc_skb(0, GFP_ATOMIC);
|
clone = alloc_skb(0, GFP_ATOMIC);
|
||||||
if (clone == NULL)
|
if (clone == NULL)
|
||||||
goto out_oom;
|
return false;
|
||||||
|
|
||||||
clone->next = head->next;
|
clone->next = head->next;
|
||||||
head->next = clone;
|
head->next = clone;
|
||||||
@ -438,7 +438,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|||||||
|
|
||||||
fp = skb_clone(prev, GFP_ATOMIC);
|
fp = skb_clone(prev, GFP_ATOMIC);
|
||||||
if (!fp)
|
if (!fp)
|
||||||
goto out_oom;
|
return false;
|
||||||
|
|
||||||
fp->next = prev->next;
|
fp->next = prev->next;
|
||||||
skb_queue_walk(head, iter) {
|
skb_queue_walk(head, iter) {
|
||||||
@ -494,16 +494,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|||||||
fq->q.fragments = NULL;
|
fq->q.fragments = NULL;
|
||||||
fq->q.fragments_tail = NULL;
|
fq->q.fragments_tail = NULL;
|
||||||
|
|
||||||
return head;
|
return true;
|
||||||
|
|
||||||
out_oversize:
|
|
||||||
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
|
|
||||||
payload_len);
|
|
||||||
goto out_fail;
|
|
||||||
out_oom:
|
|
||||||
net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n");
|
|
||||||
out_fail:
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -569,27 +560,26 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
||||||
{
|
{
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev = skb->dev;
|
||||||
|
int fhoff, nhoff, ret;
|
||||||
struct frag_hdr *fhdr;
|
struct frag_hdr *fhdr;
|
||||||
struct frag_queue *fq;
|
struct frag_queue *fq;
|
||||||
struct ipv6hdr *hdr;
|
struct ipv6hdr *hdr;
|
||||||
int fhoff, nhoff;
|
|
||||||
u8 prevhdr;
|
u8 prevhdr;
|
||||||
struct sk_buff *ret_skb = NULL;
|
|
||||||
|
|
||||||
/* Jumbo payload inhibits frag. header */
|
/* Jumbo payload inhibits frag. header */
|
||||||
if (ipv6_hdr(skb)->payload_len == 0) {
|
if (ipv6_hdr(skb)->payload_len == 0) {
|
||||||
pr_debug("payload len = 0\n");
|
pr_debug("payload len = 0\n");
|
||||||
return skb;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
|
if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
|
||||||
return skb;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
|
if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
|
||||||
return skb;
|
return -ENOMEM;
|
||||||
|
|
||||||
skb_set_transport_header(skb, fhoff);
|
skb_set_transport_header(skb, fhoff);
|
||||||
hdr = ipv6_hdr(skb);
|
hdr = ipv6_hdr(skb);
|
||||||
@ -598,27 +588,28 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
|
|||||||
fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
|
fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
|
||||||
ip6_frag_ecn(hdr));
|
ip6_frag_ecn(hdr));
|
||||||
if (fq == NULL)
|
if (fq == NULL)
|
||||||
return skb;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_bh(&fq->q.lock);
|
spin_lock_bh(&fq->q.lock);
|
||||||
|
|
||||||
if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
|
if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
|
||||||
spin_unlock_bh(&fq->q.lock);
|
ret = -EINVAL;
|
||||||
pr_debug("Can't insert skb to queue\n");
|
goto out_unlock;
|
||||||
inet_frag_put(&fq->q, &nf_frags);
|
|
||||||
return skb;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
|
||||||
|
* must be returned.
|
||||||
|
*/
|
||||||
|
ret = -EINPROGRESS;
|
||||||
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
|
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
|
||||||
fq->q.meat == fq->q.len) {
|
fq->q.meat == fq->q.len &&
|
||||||
ret_skb = nf_ct_frag6_reasm(fq, skb, dev);
|
nf_ct_frag6_reasm(fq, skb, dev))
|
||||||
if (ret_skb == NULL)
|
ret = 0;
|
||||||
pr_debug("Can't reassemble fragmented packets\n");
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&fq->q.lock);
|
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
spin_unlock_bh(&fq->q.lock);
|
||||||
inet_frag_put(&fq->q, &nf_frags);
|
inet_frag_put(&fq->q, &nf_frags);
|
||||||
return ret_skb;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
|
EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ static unsigned int ipv6_defrag(void *priv,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
const struct nf_hook_state *state)
|
const struct nf_hook_state *state)
|
||||||
{
|
{
|
||||||
struct sk_buff *reasm;
|
int err;
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
/* Previously seen (loopback)? */
|
/* Previously seen (loopback)? */
|
||||||
@ -63,17 +63,13 @@ static unsigned int ipv6_defrag(void *priv,
|
|||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
reasm = nf_ct_frag6_gather(state->net, skb,
|
err = nf_ct_frag6_gather(state->net, skb,
|
||||||
nf_ct6_defrag_user(state->hook, skb));
|
nf_ct6_defrag_user(state->hook, skb));
|
||||||
/* queued */
|
/* queued */
|
||||||
if (reasm == NULL)
|
if (err == -EINPROGRESS)
|
||||||
return NF_STOLEN;
|
return NF_STOLEN;
|
||||||
|
|
||||||
NF_HOOK_THRESH(NFPROTO_IPV6, state->hook, state->net, state->sk, reasm,
|
return NF_ACCEPT;
|
||||||
state->in, state->out,
|
|
||||||
state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
|
|
||||||
|
|
||||||
return NF_STOLEN;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nf_hook_ops ipv6_defrag_ops[] = {
|
static struct nf_hook_ops ipv6_defrag_ops[] = {
|
||||||
|
@ -300,10 +300,10 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
|
|||||||
u16 zone, struct sk_buff *skb)
|
u16 zone, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
|
struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
|
||||||
|
int err;
|
||||||
|
|
||||||
if (key->eth.type == htons(ETH_P_IP)) {
|
if (key->eth.type == htons(ETH_P_IP)) {
|
||||||
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
|
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
|
||||||
int err;
|
|
||||||
|
|
||||||
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
||||||
err = ip_defrag(net, skb, user);
|
err = ip_defrag(net, skb, user);
|
||||||
@ -314,14 +314,13 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
|
|||||||
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
||||||
} else if (key->eth.type == htons(ETH_P_IPV6)) {
|
} else if (key->eth.type == htons(ETH_P_IPV6)) {
|
||||||
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
|
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
|
||||||
struct sk_buff *reasm;
|
|
||||||
|
|
||||||
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
|
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
|
||||||
reasm = nf_ct_frag6_gather(net, skb, user);
|
err = nf_ct_frag6_gather(net, skb, user);
|
||||||
if (!reasm)
|
if (err)
|
||||||
return -EINPROGRESS;
|
return err;
|
||||||
|
|
||||||
key->ip.proto = ipv6_hdr(reasm)->nexthdr;
|
key->ip.proto = ipv6_hdr(skb)->nexthdr;
|
||||||
ovs_cb.mru = IP6CB(skb)->frag_max_size;
|
ovs_cb.mru = IP6CB(skb)->frag_max_size;
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user