net: sched: protect filter_chain list with filter_chain_lock mutex

Extend tcf_chain with new filter_chain_lock mutex. Always lock the chain
when accessing filter_chain list, instead of relying on rtnl lock.
Dereference filter_chain with tcf_chain_dereference() lockdep macro to
verify that all users of chain_list have the lock taken.

Rearrange tp insert/remove code in tc_new_tfilter/tc_del_tfilter to execute
all necessary code while holding chain lock in order to prevent
invalidation of chain_info structure by potential concurrent change. This
also serializes calls to tcf_chain0_head_change(), which allows head change
callbacks to rely on filter_chain_lock for synchronization instead of rtnl
mutex.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Vlad Buslov
2019-02-11 10:55:38 +02:00
committed by David S. Miller
parent a5654820bb
commit ed76f5edcc
3 changed files with 101 additions and 33 deletions

View File

@@ -341,6 +341,8 @@ struct qdisc_skb_cb {
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
/* Protects filter_chain. */
struct mutex filter_chain_lock;
struct tcf_proto __rcu *filter_chain;
struct list_head list;
struct tcf_block *block;
@@ -374,6 +376,21 @@ struct tcf_block {
struct rcu_head rcu;
};
#ifdef CONFIG_PROVE_LOCKING
static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
return lockdep_is_held(&chain->filter_chain_lock);
}
#else
static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
{
return true;
}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
#define tcf_chain_dereference(p, chain) \
rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
{
if (*flags & TCA_CLS_FLAGS_IN_HW)