mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
netfilter: conntrack: remove unneeded nf_ct_put
We can delay refcount increment until we reassign the existing entry to the current skb. A 0 refcount can't happen while the nf_conn object is still in the hash table and parallel mutations are impossible because we hold the bucket lock. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
bc92470413
commit
ff73e7479b
@ -908,6 +908,7 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
|
||||
tstamp->start = ktime_get_real_ns();
|
||||
}
|
||||
|
||||
/* caller must hold locks to prevent concurrent changes */
|
||||
static int __nf_ct_resolve_clash(struct sk_buff *skb,
|
||||
struct nf_conntrack_tuple_hash *h)
|
||||
{
|
||||
@ -921,13 +922,12 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
|
||||
if (nf_ct_is_dying(ct))
|
||||
return NF_DROP;
|
||||
|
||||
if (!atomic_inc_not_zero(&ct->ct_general.use))
|
||||
return NF_DROP;
|
||||
|
||||
if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
|
||||
nf_ct_match(ct, loser_ct)) {
|
||||
struct net *net = nf_ct_net(ct);
|
||||
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
|
||||
nf_ct_acct_merge(ct, ctinfo, loser_ct);
|
||||
nf_ct_add_to_dying_list(loser_ct);
|
||||
nf_conntrack_put(&loser_ct->ct_general);
|
||||
@ -937,7 +937,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
nf_ct_put(ct);
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user