net/mlx5: E-switch, add drop rule support to ingress ACL

Support inserting an ingress ACL drop rule on the uplink in
switchdev mode. This will be used by downstream patches to offload
active-backup lag mode. The drop rule (if created) is the first rule
in the ACL.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Mark Bloch 2021-11-29 09:23:51 +00:00 committed by Saeed Mahameed
parent 82e86a6c71
commit 1749c4c51c
3 changed files with 105 additions and 0 deletions

View File

@ -92,6 +92,7 @@ static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
NULL, &flow_act, NULL, 0);
@ -117,6 +118,36 @@ static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
vport->ingress.offloads.modify_metadata_rule = NULL;
}
static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
int err = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.fg = vport->ingress.offloads.drop_grp;
flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
}
vport->ingress.offloads.drop_rule = flow_rule;
out:
return err;
}
static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!vport->ingress.offloads.drop_rule)
return;
mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
vport->ingress.offloads.drop_rule = NULL;
}
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
@ -154,6 +185,7 @@ static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
{
esw_acl_ingress_allow_rule_destroy(vport);
esw_acl_ingress_mod_metadata_destroy(esw, vport);
esw_acl_ingress_src_port_drop_destroy(esw, vport);
}
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
@ -170,10 +202,29 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
if (!flow_group_in)
return -ENOMEM;
if (vport->vport == MLX5_VPORT_UPLINK) {
/* This group can hold an FTE to drop all traffic.
* Need in case LAG is enabled.
*/
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, ret);
goto drop_err;
}
vport->ingress.offloads.drop_grp = g;
flow_index++;
}
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
@ -221,6 +272,11 @@ metadata_err:
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
vport->ingress.offloads.drop_grp = NULL;
}
drop_err:
kvfree(flow_group_in);
return ret;
}
@ -236,6 +292,11 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
if (vport->ingress.offloads.drop_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
vport->ingress.offloads.drop_grp = NULL;
}
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
@ -252,6 +313,8 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
if (vport->vport == MLX5_VPORT_UPLINK)
num_ftes++;
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
@ -320,3 +383,27 @@ out:
vport->metadata = vport->default_metadata;
return err;
}
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport)) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
return PTR_ERR(vport);
}
return esw_acl_ingress_src_port_drop_create(esw, vport);
}
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
if (WARN_ON_ONCE(IS_ERR(vport))) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
return;
}
esw_acl_ingress_src_port_drop_destroy(esw, vport);
}

View File

@ -6,6 +6,7 @@
#include "eswitch.h"
#ifdef CONFIG_MLX5_ESWITCH
/* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
@ -25,5 +26,19 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
u32 metadata);
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
static void
mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw,
u16 vport_num)
{}
static int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw,
u16 vport_num)
{
return 0;
}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */

View File

@ -113,8 +113,11 @@ struct vport_ingress {
* packet with metadata.
*/
struct mlx5_flow_group *metadata_allmatch_grp;
/* Optional group to add a drop all rule */
struct mlx5_flow_group *drop_grp;
struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule;
struct mlx5_flow_handle *drop_rule;
} offloads;
};