mlx5-updates-2018-05-17

mlx5 core dirver updates for both net-next and rdma-next branches.
 
 From Christophe JAILLET, first three patche to use kvfree where needed.
 
 From: Or Gerlitz <ogerlitz@mellanox.com>
 
 Next six patches from Roi and Co adds support for merged
 sriov e-switch which comes to serve cases where both PFs, VFs set
 on them and both uplinks are to be used in single v-switch SW model.
 When merged e-switch is supported, the per-port e-switch is logically
 merged into one e-switch that spans both physical ports and all the VFs.
 
 This model allows to offload TC eswitch rules between VFs belonging
 to different PFs (and hence have different eswitch affinity), it also
 sets the some of the foundations needed for uplink LAG support.
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJa/fLEAAoJEEg/ir3gV/o+7jUH/3n5/Uw1LLt3TfeKArx6i0F1
 3G4U5B0ha03qiDqXprwhyQ3I6lgYmRBmjcxnqmvcqOAqO4/hSsjtTR+A/mgbEDhJ
 YtdekFNEX+72h/N2GIpZwChIWSE3EcMPaLYnV8TwLUgh9YSust2sCLSBbJCjxOKc
 j78M8ept/bXZwTm/iJhEjtmqw0xl91rl011chCAua0iEpH3wxteDARmKABFHMQxl
 I3N/x/e/astgcSCNgpO4uDf9zEIRkNdzcHPzSMJ6C2Oo5W9XiZEekfw7WKj9nXfa
 G+eGckkAyCOQ/r2lZ9nA0ZUvQ2X6JISvxgohuaCNwTgsz3acTxbLnQK4YWHzQCQ=
 =iHi6
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2018-05-17' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

mlx5-updates-2018-05-17

mlx5 core dirver updates for both net-next and rdma-next branches.

From Christophe JAILLET, first three patches to use kvfree where needed.

From: Or Gerlitz <ogerlitz@mellanox.com>

Next six patches from Roi and Co adds support for merged
sriov e-switch which comes to serve cases where both PFs, VFs set
on them and both uplinks are to be used in single v-switch SW model.
When merged e-switch is supported, the per-port e-switch is logically
merged into one e-switch that spans both physical ports and all the VFs.

This model allows to offload TC eswitch rules between VFs belonging
to different PFs (and hence have different eswitch affinity), it also
sets the some of the foundations needed for uplink LAG support.

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Saeed Mahameed 2018-05-17 17:47:09 -07:00
commit 260ab7042e
11 changed files with 62 additions and 17 deletions

View File

@ -849,7 +849,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
return 0;
err_cqb:
kfree(*cqb);
kvfree(*cqb);
err_db:
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);

View File

@ -235,7 +235,7 @@ const char *parse_fs_dst(struct trace_seq *p,
switch (dst->type) {
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
trace_seq_printf(p, "vport=%u\n", dst->vport_num);
trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
trace_seq_printf(p, "ft=%p\n", dst->ft);

View File

@ -839,6 +839,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
attr->out_mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@ -2497,6 +2498,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
attr->in_rep = rpriv->rep;
attr->in_mdev = priv->mdev;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
@ -2539,6 +2541,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
attr->out_mdev = out_priv->mdev;
} else if (encap) {
parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;

View File

@ -192,7 +192,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
dest.vport.num = vport;
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",

View File

@ -237,6 +237,8 @@ enum mlx5_flow_match_level {
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
struct mlx5_core_dev *out_mdev;
struct mlx5_core_dev *in_mdev;
int action;
__be16 vlan_proto;

View File

@ -71,7 +71,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport_num = attr->out_rep->vport;
dest[i].vport.num = attr->out_rep->vport;
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev, vhca_id);
dest[i].vport.vhca_id_valid = 1;
}
i++;
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
@ -88,8 +93,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(attr->in_mdev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
if (attr->match_level == MLX5_MATCH_NONE)
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
@ -347,7 +360,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
dest.vport.num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
@ -391,7 +404,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_c[0] = 0x01;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0;
dest.vport.num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
@ -667,7 +680,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
esw->offloads.vport_rx_group = g;
out:
kfree(flow_group_in);
kvfree(flow_group_in);
return err;
}

View File

@ -372,6 +372,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
if (dst->dest_attr.type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
id = dst->dest_attr.ft->id;
} else if (dst->dest_attr.type ==
MLX5_FLOW_DESTINATION_TYPE_VPORT) {
id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
dst->dest_attr.vport.vhca_id_valid);
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
} else {
id = dst->dest_attr.tir_num;
}

View File

@ -1374,6 +1374,8 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr;
u8 src_esw_owner_mask_on;
void *misc;
int err;
u32 *in;
@ -1386,6 +1388,14 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
fg->max_ftes - 1);
misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
misc_parameters);
src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, in,
source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
memcpy(match_criteria_addr, fg->mask.match_criteria,
@ -1406,7 +1416,7 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
d1->vport_num == d2->vport_num) ||
d1->vport.num == d2->vport.num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&

View File

@ -511,7 +511,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
kfree(out);
kvfree(out);
return 0;
}
@ -531,7 +531,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
kfree(out);
kvfree(out);
return 0;
}
@ -587,7 +587,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
kfree(out);
kvfree(out);
return 0;
}

View File

@ -90,8 +90,12 @@ struct mlx5_flow_destination {
union {
u32 tir_num;
struct mlx5_flow_table *ft;
u32 vport_num;
struct mlx5_fc *counter;
struct {
u16 num;
u16 vhca_id;
bool vhca_id_valid;
} vport;
};
};

View File

@ -396,7 +396,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_0[0x8];
u8 source_sqn[0x18];
u8 reserved_at_20[0x10];
u8 source_eswitch_owner_vhca_id[0x10];
u8 source_port[0x10];
u8 outer_second_prio[0x3];
@ -541,7 +541,8 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
u8 reserved_at_5[0x19];
u8 reserved_at_5[0x18];
u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
@ -1131,8 +1132,9 @@ enum mlx5_flow_destination_type {
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
u8 reserved_at_20[0x20];
u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 reserved_at_21[0xf];
u8 destination_eswitch_owner_vhca_id[0x10];
};
struct mlx5_ifc_flow_counter_list_bits {
@ -6977,7 +6979,9 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x20];
u8 source_eswitch_owner_vhca_id_valid[0x1];
u8 reserved_at_c1[0x1f];
u8 start_flow_index[0x20];