mlx5-updates-2021-03-12

1) TC support for ICMP parameters
 2) TC connection tracking with mirroring
 3) A round of trivial fixups and cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmBL+V4ACgkQSD+KveBX
 +j68tgf+Lzq6QkpGl4dpg87pjuKIwyykGVJ0NW/Yig+bNe9V3pbFCv785xymD16N
 ALcLKNtxUovE6p+Gy6JgzgBN7V9ZsM6bn564487ycYIqewz9c2KIQneJdGEUUDgi
 S8NkuCaLQst36Wl8gdiygApZmw8TAafOQYYzKOZ7HdZ+aRH+fIwbVABYreAYwjGa
 2iz5yu1AEzG5/10bTEx69rQHY+309EPy4N4na/jo/tXRL3fWQSQz6hzEnoi3EjAU
 YsktBB72eBX/NogqLIHpSzzjPgm5IFRQbNSHyL71TH9KEr0KVpzJd9VCbaic6goL
 gQn0TqoE74X7r4ZPWP3AHcGOL9L5Rg==
 =L2PO
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2021-03-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-03-12

1) TC support for ICMP parameters
2) TC connection tracking with mirroring
3) A round of trivial fixups and cleanups
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-03-12 16:36:07 -08:00
commit c232f81b0a
12 changed files with 107 additions and 55 deletions

View File

@ -263,15 +263,15 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
return 0;
}
static void dump_buf(void *buf, int size, int data_only, int offset)
static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
{
__be32 *p = buf;
int i;
for (i = 0; i < size; i += 16) {
pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
be32_to_cpu(p[1]), be32_to_cpu(p[2]),
be32_to_cpu(p[3]));
pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
be32_to_cpu(p[0]), be32_to_cpu(p[1]),
be32_to_cpu(p[2]), be32_to_cpu(p[3]));
p += 4;
offset += 16;
}
@ -802,39 +802,41 @@ static void dump_command(struct mlx5_core_dev *dev,
int dump_len;
int i;
mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
if (data_only)
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
"dump command data %s(0x%x) %s\n",
mlx5_command_str(op), op,
"cmd[%d]: dump command data %s(0x%x) %s\n",
ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT");
else
mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
mlx5_command_str(op), op,
mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT");
if (data_only) {
if (input) {
dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
offset += sizeof(ent->lay->in);
} else {
dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
offset += sizeof(ent->lay->out);
}
} else {
dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
offset += sizeof(*ent->lay);
}
for (i = 0; i < n && next; i++) {
if (data_only) {
dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
dump_buf(next->buf, dump_len, 1, offset);
dump_buf(next->buf, dump_len, 1, offset, ent->idx);
offset += MLX5_CMD_DATA_BLOCK_SIZE;
} else {
mlx5_core_dbg(dev, "command block:\n");
dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
ent->idx);
offset += sizeof(struct mlx5_cmd_prot_block);
}
next = next->next;
@ -842,6 +844,8 @@ static void dump_command(struct mlx5_core_dev *dev,
if (data_only)
pr_debug("\n");
mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
}
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)

View File

@ -137,12 +137,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
* unregistering devlink instance while holding devlink_mutext.
* Hence, do not support reload.
*/
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
return -EOPNOTSUPP;
}
if (mlx5_lag_is_active(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
return -EOPNOTSUPP;
}

View File

@ -1797,6 +1797,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_flow->post_ct_attr->prio = 0;
ct_flow->post_ct_attr->ft = ct_priv->post_ct;
/* Splits were handled before CT */
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
ct_flow->post_ct_attr->esw_attr->split_count = 0;
ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);

View File

@ -669,6 +669,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
get_cqe_opcode(cqe));
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->priv->wq, &sq->recover_work);
break;

View File

@ -445,12 +445,16 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
{
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
u32 *indirection_rqt, rqn;
struct mlx5e_priv *priv = hp->func_priv;
int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
indirection_rqt = kzalloc(sz, GFP_KERNEL);
if (!indirection_rqt)
return -ENOMEM;
mlx5e_build_default_indir_rqt(indirection_rqt, sz,
hp->num_channels);
@ -462,6 +466,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
rqn = hp->pair->rqn[ix];
MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
}
kfree(indirection_rqt);
return 0;
}
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
@ -482,12 +489,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
if (err)
goto out;
err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
if (!err)
hp->indir_rqt.enabled = true;
out:
kvfree(in);
return err;
}
@ -1077,19 +1087,23 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
} else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
if (flow_flag_test(flow, CT))
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
else
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return flow->rule[1];
}
}
@ -1947,6 +1961,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_3);
void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
@ -1976,6 +1994,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
@ -2295,7 +2314,49 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->flags)
*match_level = MLX5_MATCH_L4;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
struct flow_match_icmp match;
flow_rule_match_icmp(rule, &match);
switch (ip_proto) {
case IPPROTO_ICMP:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_ICMP))
return -EOPNOTSUPP;
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
match.key->type);
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
match.mask->code);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
match.key->code);
break;
case IPPROTO_ICMPV6:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_ICMPV6))
return -EOPNOTSUPP;
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
match.key->type);
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
match.mask->code);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
match.key->code);
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Code and type matching only with ICMP and ICMPv6");
netdev_err(priv->netdev,
"Code and type matching only with ICMP and ICMPv6\n");
return -EINVAL;
}
if (match.mask->code || match.mask->type) {
*match_level = MLX5_MATCH_L4;
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
}
}
return 0;
}
@ -2979,7 +3040,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
actions = flow->attr->action;
if (mlx5e_is_eswitch_flow(flow)) {
if (flow->attr->esw_attr->split_count && ct_flow) {
if (flow->attr->esw_attr->split_count && ct_flow &&
!MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
/* All registers used by ct are cleared when using
* split rules.
*/
@ -3779,6 +3841,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
flow_flag_set(flow, CT);
esw_attr->split_count = esw_attr->out_count;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
@ -3841,11 +3904,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
NL_SET_ERR_MSG_MOD(extack,
"Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}

View File

@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
prefetchw(wqe->data);
net_prefetchw(wqe->data);
*session = (struct mlx5e_tx_mpwqe) {
.wqe = wqe,

View File

@ -248,7 +248,7 @@ err_mod_hdr_regc0:
err_ethertype:
kfree(rule);
out:
kfree(rule_spec);
kvfree(rule_spec);
return err;
}
@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
e->recirc_cnt = 0;
out:
kfree(in);
kvfree(in);
return err;
}
@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
kfree(in);
kvfree(in);
return -ENOMEM;
}
@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
}
err_out:
kfree(spec);
kfree(in);
kvfree(spec);
kvfree(in);
return err;
}

View File

@ -768,7 +768,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;

View File

@ -492,7 +492,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
break;
default:
break;
};
}
return 0;
}

View File

@ -331,7 +331,7 @@ static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
DR_STE_ACTION_TYPE_PUSH_VLAN);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
/* Due to HW limitation we need to set this bit, otherwise reforamt +
/* Due to HW limitation we need to set this bit, otherwise reformat +
* push vlan will not work.
*/
if (go_back)

View File

@ -437,21 +437,6 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p,
u8 *s_action,
u16 decap_actions,
u32 decap_index)
{
MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_MODIFY_LIST);
MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
decap_actions);
MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
decap_index);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
u8 *s_action,
u16 num_of_actions,
@ -571,9 +556,6 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
bool allow_ctr = true;
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
dr_ste_v1_set_rx_decap_l3(last_ste, action,
attr->decap_actions,
attr->decap_index);
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->decap_actions,
attr->decap_index);
@ -1532,6 +1514,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
misc_mask->source_eswitch_owner_vhca_id = 0;
}
static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,

View File

@ -1142,6 +1142,8 @@ enum mlx5_flex_parser_protos {
MLX5_FLEX_PROTO_GENEVE = 1 << 3,
MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
MLX5_FLEX_PROTO_ICMP = 1 << 8,
MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
};
/* MLX5 DEV CAPs */