mlxsw: use intermediate representation for matchall offload

Updates the Mellanox spectrum driver to use the newer intermediate
representation for flow actions in matchall offloads.

Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Pieter Jansen van Vuuren 2019-05-04 04:46:18 -07:00 committed by David S. Miller
parent f00cbf1968
commit ab79af32b0
2 changed files with 30 additions and 19 deletions

View File

@ -1269,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
static int static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
const struct tc_action *a, const struct flow_action_entry *act,
bool ingress) bool ingress)
{ {
enum mlxsw_sp_span_type span_type; enum mlxsw_sp_span_type span_type;
struct net_device *to_dev;
to_dev = tcf_mirred_dev(a); if (!act->dev) {
if (!to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL; return -EINVAL;
} }
mirror->ingress = ingress; mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
true, &mirror->span_id); true, &mirror->span_id);
} }
@ -1302,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
static int static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls, struct tc_cls_matchall_offload *cls,
const struct tc_action *a, const struct flow_action_entry *act,
bool ingress) bool ingress)
{ {
int err; int err;
@ -1313,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(mlxsw_sp_port->dev, "sample already active\n"); netdev_err(mlxsw_sp_port->dev, "sample already active\n");
return -EEXIST; return -EEXIST;
} }
if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
tcf_sample_psample_group(a)); act->sample.psample_group);
mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); mlxsw_sp_port->sample->truncate = act->sample.truncate;
mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
mlxsw_sp_port->sample->rate = tcf_sample_rate(a); mlxsw_sp_port->sample->rate = act->sample.rate;
err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
if (err) if (err)
goto err_port_sample_set; goto err_port_sample_set;
return 0; return 0;
@ -1350,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
{ {
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol; __be16 protocol = f->common.protocol;
const struct tc_action *a; struct flow_action_entry *act;
int err; int err;
if (!tcf_exts_has_one_action(f->exts)) { if (!flow_offload_has_one_action(&f->rule->action)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -1363,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM; return -ENOMEM;
mall_tc_entry->cookie = f->cookie; mall_tc_entry->cookie = f->cookie;
a = tcf_exts_first_action(f->exts); act = &f->rule->action.entries[0];
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror; mirror = &mall_tc_entry->mirror;
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
mirror, a, ingress); mirror, act,
} else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { ingress);
} else if (act->id == FLOW_ACTION_SAMPLE &&
protocol == htons(ETH_P_ALL)) {
mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
a, ingress); act, ingress);
} else { } else {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
} }

View File

@ -177,6 +177,17 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
return action->num_entries; return action->num_entries;
} }
/**
* flow_action_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action
*
* Returns true if exactly one action is present.
*/
static inline bool flow_offload_has_one_action(const struct flow_action *action)
{
return action->num_entries == 1;
}
#define flow_action_for_each(__i, __act, __actions) \ #define flow_action_for_each(__i, __act, __actions) \
for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])