net: sched: flower: fix filter net reference counting

Fix net reference counting in fl_change() and remove redundant call to
tcf_exts_get_net() from __fl_delete(). __fl_put() already tries to get net
before releasing exts and deallocating a filter, so this code caused flower
classifier to obtain net twice per filter that is being deleted.

Implementation of __fl_delete() called tcf_exts_get_net() to pass its
result as 'async' flag to fl_mask_put(). However, 'async' flag is redundant
and only complicates fl_mask_put() implementation. This functionality seems
to be copied from filter cleanup code, where it was added by Cong with
following explanation:

    This patchset tries to fix the race between call_rcu() and
    cleanup_net() again. Without holding the netns refcnt the
    tc_action_net_exit() in netns workqueue could be called before
    filter destroy works in tc filter workqueue. This patchset
    moves the netns refcnt from tc actions to tcf_exts, without
    breaking per-netns tc actions.

This doesn't apply to flower mask, which doesn't call any tc action code
during cleanup. Simplify fl_mask_put() by removing the flag parameter and
always use tcf_queue_work() to free mask objects.

Fixes: 061775583e ("net: sched: flower: introduce reference counting for filters")
Fixes: 1f17f7742e ("net: sched: flower: insert filter to ht before offloading it to hw")
Fixes: 05cd271fd6 ("cls_flower: Support multiple masks per priority")
Reported-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Vlad Buslov 2019-04-12 00:54:19 +03:00 committed by David S. Miller
parent 56490b623a
commit 9994677c96

View File

@ -336,8 +336,7 @@ static void fl_mask_free_work(struct work_struct *work)
fl_mask_free(mask);
}
static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
bool async)
static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
{
if (!refcount_dec_and_test(&mask->refcnt))
return false;
@ -348,10 +347,7 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
list_del_rcu(&mask->list);
spin_unlock(&head->masks_lock);
if (async)
tcf_queue_work(&mask->rwork, fl_mask_free_work);
else
fl_mask_free(mask);
tcf_queue_work(&mask->rwork, fl_mask_free_work);
return true;
}
@ -538,7 +534,6 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = fl_head_dereference(tp);
bool async = tcf_exts_get_net(&f->exts);
*last = false;
@ -555,7 +550,7 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
list_del_rcu(&f->list);
spin_unlock(&tp->lock);
*last = fl_mask_put(head, f->mask, async);
*last = fl_mask_put(head, f->mask);
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f, rtnl_held, extack);
tcf_unbind_filter(tp, &f->res);
@ -1605,11 +1600,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
spin_unlock(&tp->lock);
fl_mask_put(head, fold->mask, true);
fl_mask_put(head, fold->mask);
if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
/* Caller holds reference to fold, so refcnt is always > 0
* after this.
*/
@ -1657,8 +1651,9 @@ errout_ht:
rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
errout_mask:
fl_mask_put(head, fnew->mask, true);
fl_mask_put(head, fnew->mask);
errout:
tcf_exts_get_net(&fnew->exts);
tcf_queue_work(&fnew->rwork, fl_destroy_filter_work);
errout_tb:
kfree(tb);