mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 16:41:39 +00:00
Merge branch 'net-sched-act_skbedit-lockless-data-path'
Davide Caratti says:
====================
net/sched: act_skbedit: lockless data path
the data path of act_skbedit can be faster if we avoid using spinlocks:
- patch 1 converts act_skbedit statistics to use per-cpu counters
- patch 2 lets act_skbedit use RCU to read/update its configuration
test procedure (using pktgen from https://github.com/netoptimizer):
# ip link add name eth1 type dummy
# ip link set dev eth1 up
# tc qdisc add dev eth1 clsact
# tc filter add dev eth1 egress matchall action skbedit priority c1a0:c1a0
# for c in 1 2 4 ; do
> ./pktgen_bench_xmit_mode_queue_xmit.sh -v -s 64 -t $c -n 5000000 -i eth1
> done
test results (avg. pps/thread)
$c | before patch | after patch | improvement
----+--------------+--------------+------------
1 | 3917464 ± 3% | 4000458
± 3% | irrelevant
2 | 3455367 ± 4% | 3953076 ± 1% | +14%
4 | 2496594 ± 2% | 3801123 ± 3% | +52%
v2: rebased on latest net-next
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ccb06fba51
@ -22,14 +22,19 @@
|
||||
#include <net/act_api.h>
|
||||
#include <linux/tc_act/tc_skbedit.h>
|
||||
|
||||
struct tcf_skbedit_params {
|
||||
u32 flags;
|
||||
u32 priority;
|
||||
u32 mark;
|
||||
u32 mask;
|
||||
u16 queue_mapping;
|
||||
u16 ptype;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct tcf_skbedit {
|
||||
struct tc_action common;
|
||||
u32 flags;
|
||||
u32 priority;
|
||||
u32 mark;
|
||||
u32 mask;
|
||||
u16 queue_mapping;
|
||||
u16 ptype;
|
||||
struct tc_action common;
|
||||
struct tcf_skbedit_params __rcu *params;
|
||||
};
|
||||
#define to_skbedit(a) ((struct tcf_skbedit *)a)
|
||||
|
||||
@ -37,15 +42,27 @@ struct tcf_skbedit {
|
||||
static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
|
||||
return to_skbedit(a)->flags == SKBEDIT_F_MARK;
|
||||
u32 flags;
|
||||
|
||||
if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
|
||||
rcu_read_lock();
|
||||
flags = rcu_dereference(to_skbedit(a)->params)->flags;
|
||||
rcu_read_unlock();
|
||||
return flags == SKBEDIT_F_MARK;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u32 tcf_skbedit_mark(const struct tc_action *a)
|
||||
{
|
||||
return to_skbedit(a)->mark;
|
||||
u32 mark;
|
||||
|
||||
rcu_read_lock();
|
||||
mark = rcu_dereference(to_skbedit(a)->params)->mark;
|
||||
rcu_read_unlock();
|
||||
|
||||
return mark;
|
||||
}
|
||||
|
||||
#endif /* __NET_TC_SKBEDIT_H */
|
||||
|
@ -37,14 +37,19 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
|
||||
struct tcf_result *res)
|
||||
{
|
||||
struct tcf_skbedit *d = to_skbedit(a);
|
||||
struct tcf_skbedit_params *params;
|
||||
int action;
|
||||
|
||||
spin_lock(&d->tcf_lock);
|
||||
tcf_lastuse_update(&d->tcf_tm);
|
||||
bstats_update(&d->tcf_bstats, skb);
|
||||
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
|
||||
|
||||
if (d->flags & SKBEDIT_F_PRIORITY)
|
||||
skb->priority = d->priority;
|
||||
if (d->flags & SKBEDIT_F_INHERITDSFIELD) {
|
||||
rcu_read_lock();
|
||||
params = rcu_dereference(d->params);
|
||||
action = READ_ONCE(d->tcf_action);
|
||||
|
||||
if (params->flags & SKBEDIT_F_PRIORITY)
|
||||
skb->priority = params->priority;
|
||||
if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
|
||||
int wlen = skb_network_offset(skb);
|
||||
|
||||
switch (tc_skb_protocol(skb)) {
|
||||
@ -63,23 +68,23 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
|
||||
skb->dev->real_num_tx_queues > d->queue_mapping)
|
||||
skb_set_queue_mapping(skb, d->queue_mapping);
|
||||
if (d->flags & SKBEDIT_F_MARK) {
|
||||
skb->mark &= ~d->mask;
|
||||
skb->mark |= d->mark & d->mask;
|
||||
if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
|
||||
skb->dev->real_num_tx_queues > params->queue_mapping)
|
||||
skb_set_queue_mapping(skb, params->queue_mapping);
|
||||
if (params->flags & SKBEDIT_F_MARK) {
|
||||
skb->mark &= ~params->mask;
|
||||
skb->mark |= params->mark & params->mask;
|
||||
}
|
||||
if (d->flags & SKBEDIT_F_PTYPE)
|
||||
skb->pkt_type = d->ptype;
|
||||
|
||||
spin_unlock(&d->tcf_lock);
|
||||
return d->tcf_action;
|
||||
if (params->flags & SKBEDIT_F_PTYPE)
|
||||
skb->pkt_type = params->ptype;
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return action;
|
||||
err:
|
||||
d->tcf_qstats.drops++;
|
||||
spin_unlock(&d->tcf_lock);
|
||||
return TC_ACT_SHOT;
|
||||
qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
|
||||
action = TC_ACT_SHOT;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
|
||||
@ -98,6 +103,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
|
||||
struct tcf_skbedit_params *params_old, *params_new;
|
||||
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
|
||||
struct tc_skbedit *parm;
|
||||
struct tcf_skbedit *d;
|
||||
@ -169,7 +175,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
||||
&act_skbedit_ops, bind, false);
|
||||
&act_skbedit_ops, bind, true);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, parm->index);
|
||||
return ret;
|
||||
@ -185,25 +191,34 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&d->tcf_lock);
|
||||
ASSERT_RTNL();
|
||||
|
||||
d->flags = flags;
|
||||
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
|
||||
if (unlikely(!params_new)) {
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_release(*a, bind);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
params_new->flags = flags;
|
||||
if (flags & SKBEDIT_F_PRIORITY)
|
||||
d->priority = *priority;
|
||||
params_new->priority = *priority;
|
||||
if (flags & SKBEDIT_F_QUEUE_MAPPING)
|
||||
d->queue_mapping = *queue_mapping;
|
||||
params_new->queue_mapping = *queue_mapping;
|
||||
if (flags & SKBEDIT_F_MARK)
|
||||
d->mark = *mark;
|
||||
params_new->mark = *mark;
|
||||
if (flags & SKBEDIT_F_PTYPE)
|
||||
d->ptype = *ptype;
|
||||
params_new->ptype = *ptype;
|
||||
/* default behaviour is to use all the bits */
|
||||
d->mask = 0xffffffff;
|
||||
params_new->mask = 0xffffffff;
|
||||
if (flags & SKBEDIT_F_MASK)
|
||||
d->mask = *mask;
|
||||
params_new->mask = *mask;
|
||||
|
||||
d->tcf_action = parm->action;
|
||||
|
||||
spin_unlock_bh(&d->tcf_lock);
|
||||
params_old = rtnl_dereference(d->params);
|
||||
rcu_assign_pointer(d->params, params_new);
|
||||
if (params_old)
|
||||
kfree_rcu(params_old, rcu);
|
||||
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_insert(tn, *a);
|
||||
@ -215,33 +230,36 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
{
|
||||
unsigned char *b = skb_tail_pointer(skb);
|
||||
struct tcf_skbedit *d = to_skbedit(a);
|
||||
struct tcf_skbedit_params *params;
|
||||
struct tc_skbedit opt = {
|
||||
.index = d->tcf_index,
|
||||
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
|
||||
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
|
||||
.action = d->tcf_action,
|
||||
};
|
||||
struct tcf_t t;
|
||||
u64 pure_flags = 0;
|
||||
struct tcf_t t;
|
||||
|
||||
params = rtnl_dereference(d->params);
|
||||
|
||||
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
|
||||
goto nla_put_failure;
|
||||
if ((d->flags & SKBEDIT_F_PRIORITY) &&
|
||||
nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority))
|
||||
if ((params->flags & SKBEDIT_F_PRIORITY) &&
|
||||
nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
|
||||
goto nla_put_failure;
|
||||
if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
|
||||
nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping))
|
||||
if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
|
||||
nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
|
||||
goto nla_put_failure;
|
||||
if ((d->flags & SKBEDIT_F_MARK) &&
|
||||
nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark))
|
||||
if ((params->flags & SKBEDIT_F_MARK) &&
|
||||
nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
|
||||
goto nla_put_failure;
|
||||
if ((d->flags & SKBEDIT_F_PTYPE) &&
|
||||
nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype))
|
||||
if ((params->flags & SKBEDIT_F_PTYPE) &&
|
||||
nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
|
||||
goto nla_put_failure;
|
||||
if ((d->flags & SKBEDIT_F_MASK) &&
|
||||
nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask))
|
||||
if ((params->flags & SKBEDIT_F_MASK) &&
|
||||
nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
|
||||
goto nla_put_failure;
|
||||
if (d->flags & SKBEDIT_F_INHERITDSFIELD)
|
||||
if (params->flags & SKBEDIT_F_INHERITDSFIELD)
|
||||
pure_flags |= SKBEDIT_F_INHERITDSFIELD;
|
||||
if (pure_flags != 0 &&
|
||||
nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
|
||||
@ -257,6 +275,16 @@ nla_put_failure:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void tcf_skbedit_cleanup(struct tc_action *a)
|
||||
{
|
||||
struct tcf_skbedit *d = to_skbedit(a);
|
||||
struct tcf_skbedit_params *params;
|
||||
|
||||
params = rcu_dereference_protected(d->params, 1);
|
||||
if (params)
|
||||
kfree_rcu(params, rcu);
|
||||
}
|
||||
|
||||
static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
|
||||
struct netlink_callback *cb, int type,
|
||||
const struct tc_action_ops *ops,
|
||||
@ -289,6 +317,7 @@ static struct tc_action_ops act_skbedit_ops = {
|
||||
.act = tcf_skbedit,
|
||||
.dump = tcf_skbedit_dump,
|
||||
.init = tcf_skbedit_init,
|
||||
.cleanup = tcf_skbedit_cleanup,
|
||||
.walk = tcf_skbedit_walker,
|
||||
.lookup = tcf_skbedit_search,
|
||||
.delete = tcf_skbedit_delete,
|
||||
|
Loading…
Reference in New Issue
Block a user