forked from Minki/linux
Mellanox ConnectX-4/Connect-IB shared code (SW part)
* net/mlx5: Add sniffer namespaces * net/mlx5: Introduce sniffer steering hardware capabilities * net/mlx5: Configure IB devices according to LAG state * net/mlx5: Vport LAG creation support * net/mlx5: Add LAG flow steering namespace * net/mlx5: LAG demux flow table support * net/mlx5: LAG and SRIOV cannot be used together * net/mlx5e: Avoid port remapping of mlx5e netdev TISes * net/mlx5: Get RoCE netdev * net/mlx5: Implement RoCE LAG feature * net/mlx5: Add HW interfaces used by LAG * net/mlx5: Separate query_port_proto_oper for IB and EN * net/mlx5: Expose mlx5e_link_mode * net/mlx5: Update struct mlx5_ifc_xrqc_bits * net/mlx5: Modify RQ bitmask from mlx5 ifc -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXu20hAAoJEORje4g2cliniO8P/0nMxLemOxY63u7P6DqT+UZQ +LN62W+/iLicNayKkt8mtcjnDm768YcF3ADvx73vRvKEeUyyEqT5ChMA59eicf70 rrumfNXB/kfBOaPh5rFWf4Tn8WWpKW+0559drm80NslFZF9jjF9pwv5QGg7xISb7 fYLcDQWn+5fYDuZzYsSu8zZKUEyGN0AugdjfxT5OHfh4rw+6oqGDb2fhH6LdkD8q j3Qx1cPmdQQnjJ5veXJFJT5qHFDqJlNmy85s4l99ItdWD/bcU29ue3Q3vNf7+lHp XoJB4ZRWG7sf98yXYXnOUt3iGUMdSJzpLfZqh/Nx9U1LZpdJ8lmBf7pRuR1hpPIN yDitcz+CMcFVr2WxvwWaUPhRE7SJsZxxr6tQISgRicYcFVyy9e7mLjABMtkh9vEn CXXqiDGUb/27HqTi9ha5qRiLoeT8yFpOCkINL4omV2FJKoUEbC+Jbq5P0mjnPpS1 ZdzTOzWCtkDQGtLbi+nCIF5SVTv7CCDU+6VpGZPmk6M4/ednwajhxGPsbw6bRpna ck5SglGO8dFAaUv1UVRq04PIt7Lj2FRakP7sHWx3tc9XEP8syLX0OEiVB+ZN3yRn y2TlpsREk7AqDdRulwM4qfuNd4AxaDklXyS3C79RiJtenYO4GUGrJ6J6ryesLg8u tGKVV3fXEr2Hve6cTkpu =+m21 -----END PGP SIGNATURE----- Merge tag 'shared-for-4.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma into mlx5-shared Mellanox ConnectX-4/Connect-IB shared code (SW part) * net/mlx5: Add sniffer namespaces * net/mlx5: Introduce sniffer steering hardware capabilities * net/mlx5: Configure IB devices according to LAG state * net/mlx5: Vport LAG creation support * net/mlx5: Add LAG flow steering namespace * net/mlx5: LAG demux flow table support * net/mlx5: LAG and SRIOV cannot be used together * net/mlx5e: Avoid port remapping of mlx5e netdev TISes * net/mlx5: Get RoCE netdev * net/mlx5: Implement RoCE LAG feature * net/mlx5: Add HW interfaces used by LAG * net/mlx5: Separate query_port_proto_oper for IB and EN * net/mlx5: Expose mlx5e_link_mode * net/mlx5: Update struct mlx5_ifc_xrqc_bits * net/mlx5: Modify RQ bitmask from mlx5 ifc
This commit is contained in:
commit
0c41284c83
@ -748,8 +748,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
|
||||
&props->active_width);
|
||||
if (err)
|
||||
goto out;
|
||||
err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
|
||||
port);
|
||||
err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
|
||||
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
|
||||
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
|
||||
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
|
||||
fs_counters.o rl.o
|
||||
fs_counters.o rl.o lag.o
|
||||
|
||||
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
|
||||
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
|
||||
|
@ -285,6 +285,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
|
||||
case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
|
||||
case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_DESTROY_LAG:
|
||||
case MLX5_CMD_OP_DESTROY_VPORT_LAG:
|
||||
case MLX5_CMD_OP_DESTROY_TIR:
|
||||
case MLX5_CMD_OP_DESTROY_SQ:
|
||||
case MLX5_CMD_OP_DESTROY_RQ:
|
||||
@ -376,6 +378,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
|
||||
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_CREATE_LAG:
|
||||
case MLX5_CMD_OP_MODIFY_LAG:
|
||||
case MLX5_CMD_OP_QUERY_LAG:
|
||||
case MLX5_CMD_OP_CREATE_VPORT_LAG:
|
||||
case MLX5_CMD_OP_CREATE_TIR:
|
||||
case MLX5_CMD_OP_MODIFY_TIR:
|
||||
case MLX5_CMD_OP_QUERY_TIR:
|
||||
@ -514,6 +520,12 @@ const char *mlx5_command_str(int command)
|
||||
MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
|
||||
MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
|
||||
MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
|
||||
MLX5_COMMAND_STR_CASE(CREATE_LAG);
|
||||
MLX5_COMMAND_STR_CASE(MODIFY_LAG);
|
||||
MLX5_COMMAND_STR_CASE(QUERY_LAG);
|
||||
MLX5_COMMAND_STR_CASE(DESTROY_LAG);
|
||||
MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
|
||||
MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
|
||||
MLX5_COMMAND_STR_CASE(CREATE_TIR);
|
||||
MLX5_COMMAND_STR_CASE(MODIFY_TIR);
|
||||
MLX5_COMMAND_STR_CASE(DESTROY_TIR);
|
||||
|
@ -651,40 +651,6 @@ struct mlx5e_priv {
|
||||
void *ppriv;
|
||||
};
|
||||
|
||||
enum mlx5e_link_mode {
|
||||
MLX5E_1000BASE_CX_SGMII = 0,
|
||||
MLX5E_1000BASE_KX = 1,
|
||||
MLX5E_10GBASE_CX4 = 2,
|
||||
MLX5E_10GBASE_KX4 = 3,
|
||||
MLX5E_10GBASE_KR = 4,
|
||||
MLX5E_20GBASE_KR2 = 5,
|
||||
MLX5E_40GBASE_CR4 = 6,
|
||||
MLX5E_40GBASE_KR4 = 7,
|
||||
MLX5E_56GBASE_R4 = 8,
|
||||
MLX5E_10GBASE_CR = 12,
|
||||
MLX5E_10GBASE_SR = 13,
|
||||
MLX5E_10GBASE_ER = 14,
|
||||
MLX5E_40GBASE_SR4 = 15,
|
||||
MLX5E_40GBASE_LR4 = 16,
|
||||
MLX5E_50GBASE_SR2 = 18,
|
||||
MLX5E_100GBASE_CR4 = 20,
|
||||
MLX5E_100GBASE_SR4 = 21,
|
||||
MLX5E_100GBASE_KR4 = 22,
|
||||
MLX5E_100GBASE_LR4 = 23,
|
||||
MLX5E_100BASE_TX = 24,
|
||||
MLX5E_1000BASE_T = 25,
|
||||
MLX5E_10GBASE_T = 26,
|
||||
MLX5E_25GBASE_CR = 27,
|
||||
MLX5E_25GBASE_KR = 28,
|
||||
MLX5E_25GBASE_SR = 29,
|
||||
MLX5E_50GBASE_CR2 = 30,
|
||||
MLX5E_50GBASE_KR2 = 31,
|
||||
MLX5E_LINK_MODES_NUMBER,
|
||||
};
|
||||
|
||||
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
|
||||
|
||||
|
||||
void mlx5e_build_ptys2ethtool_map(void);
|
||||
|
||||
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
|
||||
|
@ -489,7 +489,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
|
||||
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
|
||||
|
||||
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
|
||||
MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
|
||||
MLX5_SET64(modify_rq_in, in, modify_bitmask,
|
||||
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
|
||||
MLX5_SET(rqc, rqc, vsd, vsd);
|
||||
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
|
||||
|
||||
@ -2024,6 +2025,10 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
|
||||
|
||||
MLX5_SET(tisc, tisc, prio, tc << 1);
|
||||
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
|
||||
|
||||
if (mlx5_lag_is_lacp_owner(mdev))
|
||||
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
|
||||
|
||||
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
|
||||
}
|
||||
|
||||
@ -3368,6 +3373,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
struct mlx5_eswitch_rep rep;
|
||||
|
||||
mlx5_lag_add(mdev, netdev);
|
||||
|
||||
if (mlx5e_vxlan_allowed(mdev)) {
|
||||
rtnl_lock();
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
@ -3390,6 +3397,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
||||
{
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
mlx5e_disable_async_events(priv);
|
||||
mlx5_lag_remove(priv->mdev);
|
||||
}
|
||||
|
||||
static const struct mlx5e_profile mlx5e_nic_profile = {
|
||||
|
@ -58,6 +58,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
|
||||
|
||||
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
||||
u16 vport,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
enum fs_flow_table_type type, unsigned int level,
|
||||
unsigned int log_size, struct mlx5_flow_table
|
||||
*next_ft, unsigned int *table_id)
|
||||
@ -69,10 +70,6 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(create_flow_table_in, in, opcode,
|
||||
MLX5_CMD_OP_CREATE_FLOW_TABLE);
|
||||
|
||||
if (next_ft) {
|
||||
MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
|
||||
MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
|
||||
}
|
||||
MLX5_SET(create_flow_table_in, in, table_type, type);
|
||||
MLX5_SET(create_flow_table_in, in, level, level);
|
||||
MLX5_SET(create_flow_table_in, in, log_size, log_size);
|
||||
@ -81,6 +78,22 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(create_flow_table_in, in, other_vport, 1);
|
||||
}
|
||||
|
||||
switch (op_mod) {
|
||||
case FS_FT_OP_MOD_NORMAL:
|
||||
if (next_ft) {
|
||||
MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
|
||||
MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
|
||||
}
|
||||
break;
|
||||
|
||||
case FS_FT_OP_MOD_LAG_DEMUX:
|
||||
MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
|
||||
if (next_ft)
|
||||
MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
|
||||
next_ft->id);
|
||||
break;
|
||||
}
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (!err)
|
||||
*table_id = MLX5_GET(create_flow_table_out, out,
|
||||
@ -117,17 +130,32 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
|
||||
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
|
||||
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
|
||||
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
|
||||
if (ft->vport) {
|
||||
MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(modify_flow_table_in, in, other_vport, 1);
|
||||
}
|
||||
MLX5_SET(modify_flow_table_in, in, modify_field_select,
|
||||
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
|
||||
if (next_ft) {
|
||||
MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
|
||||
MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id);
|
||||
|
||||
if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
|
||||
MLX5_SET(modify_flow_table_in, in, modify_field_select,
|
||||
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
|
||||
if (next_ft) {
|
||||
MLX5_SET(modify_flow_table_in, in,
|
||||
lag_master_next_table_id, next_ft->id);
|
||||
} else {
|
||||
MLX5_SET(modify_flow_table_in, in,
|
||||
lag_master_next_table_id, 0);
|
||||
}
|
||||
} else {
|
||||
MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
|
||||
if (ft->vport) {
|
||||
MLX5_SET(modify_flow_table_in, in, vport_number,
|
||||
ft->vport);
|
||||
MLX5_SET(modify_flow_table_in, in, other_vport, 1);
|
||||
}
|
||||
MLX5_SET(modify_flow_table_in, in, modify_field_select,
|
||||
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
|
||||
if (next_ft) {
|
||||
MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
|
||||
MLX5_SET(modify_flow_table_in, in, table_miss_id,
|
||||
next_ft->id);
|
||||
} else {
|
||||
MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
||||
u16 vport,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
enum fs_flow_table_type type, unsigned int level,
|
||||
unsigned int log_size, struct mlx5_flow_table
|
||||
*next_ft, unsigned int *table_id);
|
||||
|
@ -96,6 +96,10 @@
|
||||
#define OFFLOADS_NUM_PRIOS 1
|
||||
#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
|
||||
|
||||
#define LAG_PRIO_NUM_LEVELS 1
|
||||
#define LAG_NUM_PRIOS 1
|
||||
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
|
||||
|
||||
struct node_caps {
|
||||
size_t arr_sz;
|
||||
long *caps;
|
||||
@ -111,12 +115,16 @@ static struct init_tree_node {
|
||||
int num_levels;
|
||||
} root_fs = {
|
||||
.type = FS_TYPE_NAMESPACE,
|
||||
.ar_size = 6,
|
||||
.ar_size = 7,
|
||||
.children = (struct init_tree_node[]) {
|
||||
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
|
||||
BY_PASS_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, LAG_MIN_LEVEL, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
|
||||
LAG_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
|
||||
ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
|
||||
@ -477,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
|
||||
}
|
||||
|
||||
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
|
||||
enum fs_flow_table_type table_type)
|
||||
enum fs_flow_table_type table_type,
|
||||
enum fs_flow_table_op_mod op_mod)
|
||||
{
|
||||
struct mlx5_flow_table *ft;
|
||||
|
||||
@ -487,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
|
||||
|
||||
ft->level = level;
|
||||
ft->node.type = FS_TYPE_FLOW_TABLE;
|
||||
ft->op_mod = op_mod;
|
||||
ft->type = table_type;
|
||||
ft->vport = vport;
|
||||
ft->max_fte = max_fte;
|
||||
@ -724,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
|
||||
}
|
||||
|
||||
static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
u16 vport, int prio,
|
||||
int max_fte, u32 level)
|
||||
{
|
||||
@ -756,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
||||
level += fs_prio->start_level;
|
||||
ft = alloc_flow_table(level,
|
||||
vport,
|
||||
roundup_pow_of_two(max_fte),
|
||||
root->table_type);
|
||||
max_fte ? roundup_pow_of_two(max_fte) : 0,
|
||||
root->table_type,
|
||||
op_mod);
|
||||
if (!ft) {
|
||||
err = -ENOMEM;
|
||||
goto unlock_root;
|
||||
}
|
||||
|
||||
tree_init_node(&ft->node, 1, del_flow_table);
|
||||
log_table_sz = ilog2(ft->max_fte);
|
||||
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
|
||||
next_ft = find_next_chained_ft(fs_prio);
|
||||
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
|
||||
log_table_sz, next_ft, &ft->id);
|
||||
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
|
||||
ft->level, log_table_sz, next_ft, &ft->id);
|
||||
if (err)
|
||||
goto free_ft;
|
||||
|
||||
@ -794,16 +806,27 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
|
||||
int prio, int max_fte,
|
||||
u32 level)
|
||||
{
|
||||
return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
|
||||
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
|
||||
max_fte, level);
|
||||
}
|
||||
|
||||
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
|
||||
int prio, int max_fte,
|
||||
u32 level, u16 vport)
|
||||
{
|
||||
return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
|
||||
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
|
||||
max_fte, level);
|
||||
}
|
||||
|
||||
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
|
||||
struct mlx5_flow_namespace *ns,
|
||||
int prio, u32 level)
|
||||
{
|
||||
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
|
||||
level);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
|
||||
|
||||
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
|
||||
int prio,
|
||||
int num_flow_table_entries,
|
||||
@ -1381,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
|
||||
switch (type) {
|
||||
case MLX5_FLOW_NAMESPACE_BYPASS:
|
||||
case MLX5_FLOW_NAMESPACE_LAG:
|
||||
case MLX5_FLOW_NAMESPACE_OFFLOADS:
|
||||
case MLX5_FLOW_NAMESPACE_ETHTOOL:
|
||||
case MLX5_FLOW_NAMESPACE_KERNEL:
|
||||
@ -1403,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
return &steering->esw_ingress_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
|
||||
if (steering->sniffer_rx_root_ns)
|
||||
return &steering->sniffer_rx_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
|
||||
if (steering->sniffer_tx_root_ns)
|
||||
return &steering->sniffer_tx_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
@ -1702,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
||||
cleanup_root_ns(steering->esw_egress_root_ns);
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
kfree(steering);
|
||||
}
|
||||
|
||||
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
|
||||
steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
|
||||
if (!steering->sniffer_tx_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Create single prio */
|
||||
prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
|
||||
if (IS_ERR(prio)) {
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
return PTR_ERR(prio);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
|
||||
steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
|
||||
if (!steering->sniffer_rx_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Create single prio */
|
||||
prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
|
||||
if (IS_ERR(prio)) {
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
return PTR_ERR(prio);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
@ -1802,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
|
||||
err = init_sniffer_rx_root_ns(steering);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
|
||||
err = init_sniffer_tx_root_ns(steering);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
mlx5_cleanup_fs(dev);
|
||||
|
@ -49,6 +49,13 @@ enum fs_flow_table_type {
|
||||
FS_FT_ESW_EGRESS_ACL = 0x2,
|
||||
FS_FT_ESW_INGRESS_ACL = 0x3,
|
||||
FS_FT_FDB = 0X4,
|
||||
FS_FT_SNIFFER_RX = 0X5,
|
||||
FS_FT_SNIFFER_TX = 0X6,
|
||||
};
|
||||
|
||||
enum fs_flow_table_op_mod {
|
||||
FS_FT_OP_MOD_NORMAL,
|
||||
FS_FT_OP_MOD_LAG_DEMUX,
|
||||
};
|
||||
|
||||
enum fs_fte_status {
|
||||
@ -61,6 +68,8 @@ struct mlx5_flow_steering {
|
||||
struct mlx5_flow_root_namespace *fdb_root_ns;
|
||||
struct mlx5_flow_root_namespace *esw_egress_root_ns;
|
||||
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
|
||||
};
|
||||
|
||||
struct fs_node {
|
||||
@ -93,6 +102,7 @@ struct mlx5_flow_table {
|
||||
unsigned int max_fte;
|
||||
unsigned int level;
|
||||
enum fs_flow_table_type type;
|
||||
enum fs_flow_table_op_mod op_mod;
|
||||
struct {
|
||||
bool active;
|
||||
unsigned int required_groups;
|
||||
|
602
drivers/net/ethernet/mellanox/mlx5/core/lag.c
Normal file
602
drivers/net/ethernet/mellanox/mlx5/core/lag.c
Normal file
@ -0,0 +1,602 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
enum {
|
||||
MLX5_LAG_FLAG_BONDED = 1 << 0,
|
||||
};
|
||||
|
||||
struct lag_func {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct net_device *netdev;
|
||||
};
|
||||
|
||||
/* Used for collection of netdev event info. */
|
||||
struct lag_tracker {
|
||||
enum netdev_lag_tx_type tx_type;
|
||||
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
|
||||
bool is_bonded;
|
||||
};
|
||||
|
||||
/* LAG data of a ConnectX card.
|
||||
* It serves both its phys functions.
|
||||
*/
|
||||
struct mlx5_lag {
|
||||
u8 flags;
|
||||
u8 v2p_map[MLX5_MAX_PORTS];
|
||||
struct lag_func pf[MLX5_MAX_PORTS];
|
||||
struct lag_tracker tracker;
|
||||
struct delayed_work bond_work;
|
||||
struct notifier_block nb;
|
||||
};
|
||||
|
||||
/* General purpose, use for short periods of time.
|
||||
* Beware of lock dependencies (preferably, no locks should be acquired
|
||||
* under it).
|
||||
*/
|
||||
static DEFINE_MUTEX(lag_mutex);
|
||||
|
||||
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
|
||||
u8 remap_port2)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
|
||||
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
|
||||
|
||||
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
|
||||
|
||||
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
|
||||
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
|
||||
u8 remap_port2)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
|
||||
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
|
||||
|
||||
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
|
||||
MLX5_SET(modify_lag_in, in, field_select, 0x1);
|
||||
|
||||
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
|
||||
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
|
||||
|
||||
MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
|
||||
|
||||
MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
|
||||
|
||||
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
|
||||
|
||||
MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
|
||||
|
||||
static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->priv.lag;
|
||||
}
|
||||
|
||||
static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
if (ldev->pf[i].netdev == ndev)
|
||||
return i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
|
||||
}
|
||||
|
||||
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
|
||||
u8 *port1, u8 *port2)
|
||||
{
|
||||
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
|
||||
if (tracker->netdev_state[0].tx_enabled) {
|
||||
*port1 = 1;
|
||||
*port2 = 1;
|
||||
} else {
|
||||
*port1 = 2;
|
||||
*port2 = 2;
|
||||
}
|
||||
} else {
|
||||
*port1 = 1;
|
||||
*port2 = 2;
|
||||
if (!tracker->netdev_state[0].link_up)
|
||||
*port1 = 2;
|
||||
else if (!tracker->netdev_state[1].link_up)
|
||||
*port2 = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_activate_lag(struct mlx5_lag *ldev,
|
||||
struct lag_tracker *tracker)
|
||||
{
|
||||
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
|
||||
int err;
|
||||
|
||||
ldev->flags |= MLX5_LAG_FLAG_BONDED;
|
||||
|
||||
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
|
||||
&ldev->v2p_map[1]);
|
||||
|
||||
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
|
||||
if (err)
|
||||
mlx5_core_err(dev0,
|
||||
"Failed to create LAG (%d)\n",
|
||||
err);
|
||||
}
|
||||
|
||||
static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
||||
{
|
||||
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
|
||||
int err;
|
||||
|
||||
ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
|
||||
|
||||
err = mlx5_cmd_destroy_lag(dev0);
|
||||
if (err)
|
||||
mlx5_core_err(dev0,
|
||||
"Failed to destroy LAG (%d)\n",
|
||||
err);
|
||||
}
|
||||
|
||||
static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||
{
|
||||
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
|
||||
struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
|
||||
struct lag_tracker tracker;
|
||||
u8 v2p_port1, v2p_port2;
|
||||
int i, err;
|
||||
|
||||
if (!dev0 || !dev1)
|
||||
return;
|
||||
|
||||
mutex_lock(&lag_mutex);
|
||||
tracker = ldev->tracker;
|
||||
mutex_unlock(&lag_mutex);
|
||||
|
||||
if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
|
||||
if (mlx5_sriov_is_enabled(dev0) ||
|
||||
mlx5_sriov_is_enabled(dev1)) {
|
||||
mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
|
||||
MLX5_INTERFACE_PROTOCOL_IB);
|
||||
|
||||
mlx5_activate_lag(ldev, &tracker);
|
||||
|
||||
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_nic_vport_enable_roce(dev1);
|
||||
} else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
|
||||
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
|
||||
&v2p_port2);
|
||||
|
||||
if ((v2p_port1 != ldev->v2p_map[0]) ||
|
||||
(v2p_port2 != ldev->v2p_map[1])) {
|
||||
ldev->v2p_map[0] = v2p_port1;
|
||||
ldev->v2p_map[1] = v2p_port2;
|
||||
|
||||
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
|
||||
if (err)
|
||||
mlx5_core_err(dev0,
|
||||
"Failed to modify LAG (%d)\n",
|
||||
err);
|
||||
}
|
||||
} else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
|
||||
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_nic_vport_disable_roce(dev1);
|
||||
|
||||
mlx5_deactivate_lag(ldev);
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
if (ldev->pf[i].dev)
|
||||
mlx5_add_dev_by_protocol(ldev->pf[i].dev,
|
||||
MLX5_INTERFACE_PROTOCOL_IB);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
|
||||
{
|
||||
schedule_delayed_work(&ldev->bond_work, delay);
|
||||
}
|
||||
|
||||
static void mlx5_do_bond_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *delayed_work = to_delayed_work(work);
|
||||
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
|
||||
bond_work);
|
||||
int status;
|
||||
|
||||
status = mutex_trylock(&mlx5_intf_mutex);
|
||||
if (!status) {
|
||||
/* 1 sec delay. */
|
||||
mlx5_queue_bond_work(ldev, HZ);
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5_do_bond(ldev);
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
}
|
||||
|
||||
static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
|
||||
struct lag_tracker *tracker,
|
||||
struct net_device *ndev,
|
||||
struct netdev_notifier_changeupper_info *info)
|
||||
{
|
||||
struct net_device *upper = info->upper_dev, *ndev_tmp;
|
||||
struct netdev_lag_upper_info *lag_upper_info;
|
||||
bool is_bonded;
|
||||
int bond_status = 0;
|
||||
int num_slaves = 0;
|
||||
int idx;
|
||||
|
||||
if (!netif_is_lag_master(upper))
|
||||
return 0;
|
||||
|
||||
lag_upper_info = info->upper_info;
|
||||
|
||||
/* The event may still be of interest if the slave does not belong to
|
||||
* us, but is enslaved to a master which has one or more of our netdevs
|
||||
* as slaves (e.g., if a new slave is added to a master that bonds two
|
||||
* of our netdevs, we should unbond).
|
||||
*/
|
||||
rcu_read_lock();
|
||||
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
|
||||
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
|
||||
if (idx > -1)
|
||||
bond_status |= (1 << idx);
|
||||
|
||||
num_slaves++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* None of this lagdev's netdevs are slaves of this master. */
|
||||
if (!(bond_status & 0x3))
|
||||
return 0;
|
||||
|
||||
if (lag_upper_info)
|
||||
tracker->tx_type = lag_upper_info->tx_type;
|
||||
|
||||
/* Determine bonding status:
|
||||
* A device is considered bonded if both its physical ports are slaves
|
||||
* of the same lag master, and only them.
|
||||
* Lag mode must be activebackup or hash.
|
||||
*/
|
||||
is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
|
||||
(bond_status == 0x3) &&
|
||||
((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
|
||||
(tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
|
||||
|
||||
if (tracker->is_bonded != is_bonded) {
|
||||
tracker->is_bonded = is_bonded;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
|
||||
struct lag_tracker *tracker,
|
||||
struct net_device *ndev,
|
||||
struct netdev_notifier_changelowerstate_info *info)
|
||||
{
|
||||
struct netdev_lag_lower_state_info *lag_lower_info;
|
||||
int idx;
|
||||
|
||||
if (!netif_is_lag_port(ndev))
|
||||
return 0;
|
||||
|
||||
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
|
||||
if (idx == -1)
|
||||
return 0;
|
||||
|
||||
/* This information is used to determine virtual to physical
|
||||
* port mapping.
|
||||
*/
|
||||
lag_lower_info = info->lower_state_info;
|
||||
if (!lag_lower_info)
|
||||
return 0;
|
||||
|
||||
tracker->netdev_state[idx] = *lag_lower_info;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int mlx5_lag_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
|
||||
struct lag_tracker tracker;
|
||||
struct mlx5_lag *ldev;
|
||||
int changed = 0;
|
||||
|
||||
if (!net_eq(dev_net(ndev), &init_net))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
ldev = container_of(this, struct mlx5_lag, nb);
|
||||
tracker = ldev->tracker;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_CHANGEUPPER:
|
||||
changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
|
||||
ptr);
|
||||
break;
|
||||
case NETDEV_CHANGELOWERSTATE:
|
||||
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
|
||||
ndev, ptr);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_lock(&lag_mutex);
|
||||
ldev->tracker = tracker;
|
||||
mutex_unlock(&lag_mutex);
|
||||
|
||||
if (changed)
|
||||
mlx5_queue_bond_work(ldev, 0);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
|
||||
ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
|
||||
if (!ldev)
|
||||
return NULL;
|
||||
|
||||
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
|
||||
|
||||
return ldev;
|
||||
}
|
||||
|
||||
static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
|
||||
{
|
||||
kfree(ldev);
|
||||
}
|
||||
|
||||
static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
|
||||
struct mlx5_core_dev *dev,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
|
||||
|
||||
if (fn >= MLX5_MAX_PORTS)
|
||||
return;
|
||||
|
||||
mutex_lock(&lag_mutex);
|
||||
ldev->pf[fn].dev = dev;
|
||||
ldev->pf[fn].netdev = netdev;
|
||||
ldev->tracker.netdev_state[fn].link_up = 0;
|
||||
ldev->tracker.netdev_state[fn].tx_enabled = 0;
|
||||
|
||||
dev->priv.lag = ldev;
|
||||
mutex_unlock(&lag_mutex);
|
||||
}
|
||||
|
||||
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
|
||||
struct mlx5_core_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
if (ldev->pf[i].dev == dev)
|
||||
break;
|
||||
|
||||
if (i == MLX5_MAX_PORTS)
|
||||
return;
|
||||
|
||||
mutex_lock(&lag_mutex);
|
||||
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
|
||||
|
||||
dev->priv.lag = NULL;
|
||||
mutex_unlock(&lag_mutex);
|
||||
}
|
||||
|
||||
static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return (u16)((dev->pdev->bus->number << 8) |
|
||||
PCI_SLOT(dev->pdev->devfn));
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
||||
{
|
||||
struct mlx5_lag *ldev = NULL;
|
||||
struct mlx5_core_dev *tmp_dev;
|
||||
struct mlx5_priv *priv;
|
||||
u16 pci_id;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
|
||||
!MLX5_CAP_GEN(dev, lag_master) ||
|
||||
(MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
|
||||
return;
|
||||
|
||||
pci_id = mlx5_gen_pci_id(dev);
|
||||
|
||||
mlx5_core_for_each_priv(priv) {
|
||||
tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
if ((dev != tmp_dev) &&
|
||||
(mlx5_gen_pci_id(tmp_dev) == pci_id)) {
|
||||
ldev = tmp_dev->priv.lag;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ldev) {
|
||||
ldev = mlx5_lag_dev_alloc();
|
||||
if (!ldev) {
|
||||
mlx5_core_err(dev, "Failed to alloc lag dev\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mlx5_lag_dev_add_pf(ldev, dev, netdev);
|
||||
|
||||
if (!ldev->nb.notifier_call) {
|
||||
ldev->nb.notifier_call = mlx5_lag_netdev_event;
|
||||
if (register_netdevice_notifier(&ldev->nb)) {
|
||||
ldev->nb.notifier_call = NULL;
|
||||
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
void mlx5_lag_remove(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
int i;
|
||||
|
||||
ldev = mlx5_lag_dev_get(dev);
|
||||
if (!ldev)
|
||||
return;
|
||||
|
||||
if (mlx5_lag_is_bonded(ldev))
|
||||
mlx5_deactivate_lag(ldev);
|
||||
|
||||
mlx5_lag_dev_remove_pf(ldev, dev);
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
if (ldev->pf[i].dev)
|
||||
break;
|
||||
|
||||
if (i == MLX5_MAX_PORTS) {
|
||||
if (ldev->nb.notifier_call)
|
||||
unregister_netdevice_notifier(&ldev->nb);
|
||||
cancel_delayed_work_sync(&ldev->bond_work);
|
||||
mlx5_lag_dev_free(ldev);
|
||||
}
|
||||
}
|
||||
|
||||
bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
bool res;
|
||||
|
||||
mutex_lock(&lag_mutex);
|
||||
ldev = mlx5_lag_dev_get(dev);
|
||||
res = ldev && mlx5_lag_is_bonded(ldev);
|
||||
mutex_unlock(&lag_mutex);
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_lag_is_active);
|
||||
|
||||
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct net_device *ndev = NULL;
|
||||
struct mlx5_lag *ldev;
|
||||
|
||||
mutex_lock(&lag_mutex);
|
||||
ldev = mlx5_lag_dev_get(dev);
|
||||
|
||||
if (!(ldev && mlx5_lag_is_bonded(ldev)))
|
||||
goto unlock;
|
||||
|
||||
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
|
||||
ndev = ldev->tracker.netdev_state[0].tx_enabled ?
|
||||
ldev->pf[0].netdev : ldev->pf[1].netdev;
|
||||
} else {
|
||||
ndev = ldev->pf[0].netdev;
|
||||
}
|
||||
if (ndev)
|
||||
dev_hold(ndev);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&lag_mutex);
|
||||
|
||||
return ndev;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
|
||||
|
||||
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
|
||||
priv);
|
||||
struct mlx5_lag *ldev;
|
||||
|
||||
if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
|
||||
return true;
|
||||
|
||||
ldev = mlx5_lag_dev_get(dev);
|
||||
if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
|
||||
return true;
|
||||
|
||||
/* If bonded, we do not add an IB device for PF1. */
|
||||
return false;
|
||||
}
|
||||
|
@ -73,8 +73,9 @@ module_param_named(prof_sel, prof_sel, int, 0444);
|
||||
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
|
||||
|
||||
static LIST_HEAD(intf_list);
|
||||
static LIST_HEAD(dev_list);
|
||||
static DEFINE_MUTEX(intf_mutex);
|
||||
|
||||
LIST_HEAD(mlx5_dev_list);
|
||||
DEFINE_MUTEX(mlx5_intf_mutex);
|
||||
|
||||
struct mlx5_device_context {
|
||||
struct list_head list;
|
||||
@ -782,6 +783,9 @@ static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||
struct mlx5_device_context *dev_ctx;
|
||||
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
|
||||
if (!mlx5_lag_intf_add(intf, priv))
|
||||
return;
|
||||
|
||||
dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
|
||||
if (!dev_ctx)
|
||||
return;
|
||||
@ -820,11 +824,11 @@ static int mlx5_register_device(struct mlx5_core_dev *dev)
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_interface *intf;
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
list_add_tail(&priv->dev_list, &dev_list);
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
list_add_tail(&priv->dev_list, &mlx5_dev_list);
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
mlx5_add_device(intf, priv);
|
||||
mutex_unlock(&intf_mutex);
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -834,11 +838,11 @@ static void mlx5_unregister_device(struct mlx5_core_dev *dev)
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_interface *intf;
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
mlx5_remove_device(intf, priv);
|
||||
list_del(&priv->dev_list);
|
||||
mutex_unlock(&intf_mutex);
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
}
|
||||
|
||||
int mlx5_register_interface(struct mlx5_interface *intf)
|
||||
@ -848,11 +852,11 @@ int mlx5_register_interface(struct mlx5_interface *intf)
|
||||
if (!intf->add || !intf->remove)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
list_add_tail(&intf->list, &intf_list);
|
||||
list_for_each_entry(priv, &dev_list, dev_list)
|
||||
list_for_each_entry(priv, &mlx5_dev_list, dev_list)
|
||||
mlx5_add_device(intf, priv);
|
||||
mutex_unlock(&intf_mutex);
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -862,11 +866,11 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
|
||||
{
|
||||
struct mlx5_priv *priv;
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
list_for_each_entry(priv, &dev_list, dev_list)
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
list_for_each_entry(priv, &mlx5_dev_list, dev_list)
|
||||
mlx5_remove_device(intf, priv);
|
||||
list_del(&intf->list);
|
||||
mutex_unlock(&intf_mutex);
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_unregister_interface);
|
||||
|
||||
@ -892,6 +896,30 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_get_protocol_dev);
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
|
||||
{
|
||||
struct mlx5_interface *intf;
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
if (intf->protocol == protocol) {
|
||||
mlx5_add_device(intf, &dev->priv);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
|
||||
{
|
||||
struct mlx5_interface *intf;
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
if (intf->protocol == protocol) {
|
||||
mlx5_remove_device(intf, &dev->priv);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
|
@ -46,6 +46,9 @@
|
||||
|
||||
extern int mlx5_core_debug_mask;
|
||||
|
||||
extern struct list_head mlx5_dev_list;
|
||||
extern struct mutex mlx5_intf_mutex;
|
||||
|
||||
#define mlx5_core_dbg(__dev, format, ...) \
|
||||
dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
|
||||
(__dev)->priv.name, __func__, __LINE__, current->pid, \
|
||||
@ -70,6 +73,9 @@ do { \
|
||||
#define mlx5_core_info(__dev, format, ...) \
|
||||
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
|
||||
|
||||
#define mlx5_core_for_each_priv(__priv) \
|
||||
list_for_each_entry(__priv, &mlx5_dev_list, dev_list)
|
||||
|
||||
enum {
|
||||
MLX5_CMD_DATA, /* print command payload only */
|
||||
MLX5_CMD_TIME, /* print command execution time */
|
||||
@ -84,6 +90,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
|
||||
void mlx5_disable_device(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
|
||||
bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
|
||||
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
|
||||
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
|
||||
@ -92,7 +99,27 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
|
||||
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
|
||||
void mlx5_cq_tasklet_cb(unsigned long data);
|
||||
|
||||
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
|
||||
void mlx5_lag_remove(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
|
||||
void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
|
||||
|
||||
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
|
||||
|
||||
void mlx5e_init(void);
|
||||
void mlx5e_cleanup(void);
|
||||
|
||||
static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* LACP owner conditions:
|
||||
* 1) Function is physical.
|
||||
* 2) LAG is supported by FW.
|
||||
* 3) LAG is managed by driver (currently the only option).
|
||||
*/
|
||||
return MLX5_CAP_GEN(dev, vport_group_manager) &&
|
||||
(MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
|
||||
MLX5_CAP_GEN(dev, lag_master);
|
||||
}
|
||||
|
||||
#endif /* __MLX5_CORE_H__ */
|
||||
|
@ -175,25 +175,39 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
|
||||
|
||||
int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
|
||||
u8 *proto_oper, int proto_mask,
|
||||
u8 local_port)
|
||||
int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
|
||||
u32 *proto_oper, u8 local_port)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
|
||||
int err;
|
||||
|
||||
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
|
||||
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
|
||||
local_port);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (proto_mask == MLX5_PTYS_EN)
|
||||
*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
|
||||
else
|
||||
*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
|
||||
*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
|
||||
EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
|
||||
|
||||
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
|
||||
u8 *proto_oper, u8 local_port)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
|
||||
int err;
|
||||
|
||||
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB,
|
||||
local_port);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
|
||||
|
||||
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
|
||||
u32 proto_admin, int proto_mask)
|
||||
|
@ -37,6 +37,13 @@
|
||||
#include "eswitch.h"
|
||||
#endif
|
||||
|
||||
bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
||||
|
||||
return !!sriov->num_vfs;
|
||||
}
|
||||
|
||||
static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs)
|
||||
{
|
||||
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
||||
@ -144,6 +151,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
if (!mlx5_core_is_pf(dev))
|
||||
return -EPERM;
|
||||
|
||||
if (num_vfs && mlx5_lag_is_active(dev)) {
|
||||
mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mlx5_core_cleanup_vfs(dev);
|
||||
|
||||
if (!num_vfs) {
|
||||
|
@ -964,6 +964,18 @@ enum mlx5_cap_type {
|
||||
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
|
||||
|
||||
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
|
||||
MLX5_GET(flow_table_eswitch_cap, \
|
||||
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
|
||||
|
@ -48,10 +48,6 @@
|
||||
#include <linux/mlx5/doorbell.h>
|
||||
#include <linux/mlx5/srq.h>
|
||||
|
||||
enum {
|
||||
MLX5_RQ_BITMASK_VSD = 1 << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_BOARD_ID_LEN = 64,
|
||||
MLX5_MAX_NAME_LEN = 16,
|
||||
@ -481,6 +477,7 @@ struct mlx5_fc_stats {
|
||||
};
|
||||
|
||||
struct mlx5_eswitch;
|
||||
struct mlx5_lag;
|
||||
|
||||
struct mlx5_rl_entry {
|
||||
u32 rate;
|
||||
@ -554,6 +551,7 @@ struct mlx5_priv {
|
||||
struct mlx5_flow_steering *steering;
|
||||
struct mlx5_eswitch *eswitch;
|
||||
struct mlx5_core_sriov sriov;
|
||||
struct mlx5_lag *lag;
|
||||
unsigned long pci_dev_data;
|
||||
struct mlx5_fc_stats fc_stats;
|
||||
struct mlx5_rl_table rl_table;
|
||||
@ -946,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
|
||||
void mlx5_unregister_interface(struct mlx5_interface *intf);
|
||||
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
|
||||
|
||||
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
|
||||
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
|
||||
|
||||
struct mlx5_profile {
|
||||
u64 mask;
|
||||
u8 log_max_qp;
|
||||
|
@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
|
||||
|
||||
enum mlx5_flow_namespace_type {
|
||||
MLX5_FLOW_NAMESPACE_BYPASS,
|
||||
MLX5_FLOW_NAMESPACE_LAG,
|
||||
MLX5_FLOW_NAMESPACE_OFFLOADS,
|
||||
MLX5_FLOW_NAMESPACE_ETHTOOL,
|
||||
MLX5_FLOW_NAMESPACE_KERNEL,
|
||||
@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
|
||||
MLX5_FLOW_NAMESPACE_FDB,
|
||||
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
||||
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
|
||||
};
|
||||
|
||||
struct mlx5_flow_table;
|
||||
@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
|
||||
int prio,
|
||||
int num_flow_table_entries,
|
||||
u32 level, u16 vport);
|
||||
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
|
||||
struct mlx5_flow_namespace *ns,
|
||||
int prio, u32 level);
|
||||
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
|
||||
|
||||
/* inbox should be set with the following values:
|
||||
|
@ -174,6 +174,12 @@ enum {
|
||||
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
|
||||
MLX5_CMD_OP_SET_WOL_ROL = 0x830,
|
||||
MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
|
||||
MLX5_CMD_OP_CREATE_LAG = 0x840,
|
||||
MLX5_CMD_OP_MODIFY_LAG = 0x841,
|
||||
MLX5_CMD_OP_QUERY_LAG = 0x842,
|
||||
MLX5_CMD_OP_DESTROY_LAG = 0x843,
|
||||
MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844,
|
||||
MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845,
|
||||
MLX5_CMD_OP_CREATE_TIR = 0x900,
|
||||
MLX5_CMD_OP_MODIFY_TIR = 0x901,
|
||||
MLX5_CMD_OP_DESTROY_TIR = 0x902,
|
||||
@ -477,7 +483,9 @@ struct mlx5_ifc_ads_bits {
|
||||
|
||||
struct mlx5_ifc_flow_table_nic_cap_bits {
|
||||
u8 nic_rx_multi_path_tirs[0x1];
|
||||
u8 reserved_at_1[0x1ff];
|
||||
u8 nic_rx_multi_path_tirs_fts[0x1];
|
||||
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
|
||||
u8 reserved_at_3[0x1fd];
|
||||
|
||||
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
|
||||
|
||||
@ -779,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 out_of_seq_cnt[0x1];
|
||||
u8 vport_counters[0x1];
|
||||
u8 retransmission_q_counters[0x1];
|
||||
u8 reserved_at_183[0x3];
|
||||
u8 reserved_at_183[0x1];
|
||||
u8 modify_rq_counter_set_id[0x1];
|
||||
u8 reserved_at_185[0x1];
|
||||
u8 max_qp_cnt[0xa];
|
||||
u8 pkey_table_size[0x10];
|
||||
|
||||
@ -882,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 pad_tx_eth_packet[0x1];
|
||||
u8 reserved_at_263[0x8];
|
||||
u8 log_bf_reg_size[0x5];
|
||||
u8 reserved_at_270[0x10];
|
||||
|
||||
u8 reserved_at_270[0xb];
|
||||
u8 lag_master[0x1];
|
||||
u8 num_lag_ports[0x4];
|
||||
|
||||
u8 reserved_at_280[0x10];
|
||||
u8 max_wqe_sz_sq[0x10];
|
||||
@ -1916,7 +1929,7 @@ enum {
|
||||
|
||||
struct mlx5_ifc_qpc_bits {
|
||||
u8 state[0x4];
|
||||
u8 reserved_at_4[0x4];
|
||||
u8 lag_tx_port_affinity[0x4];
|
||||
u8 st[0x8];
|
||||
u8 reserved_at_10[0x3];
|
||||
u8 pm_state[0x2];
|
||||
@ -2165,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tisc_bits {
|
||||
u8 reserved_at_0[0xc];
|
||||
u8 strict_lag_tx_port_affinity[0x1];
|
||||
u8 reserved_at_1[0x3];
|
||||
u8 lag_tx_port_affinity[0x04];
|
||||
|
||||
u8 reserved_at_8[0x4];
|
||||
u8 prio[0x4];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
@ -2827,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
|
||||
|
||||
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
|
||||
|
||||
u8 reserved_at_180[0x180];
|
||||
u8 reserved_at_180[0x200];
|
||||
|
||||
struct mlx5_ifc_wq_bits wq;
|
||||
};
|
||||
@ -4615,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
|
||||
struct mlx5_ifc_modify_tis_bitmask_bits {
|
||||
u8 reserved_at_0[0x20];
|
||||
|
||||
u8 reserved_at_20[0x1f];
|
||||
u8 reserved_at_20[0x1d];
|
||||
u8 lag_tx_port_affinity[0x1];
|
||||
u8 strict_lag_tx_port_affinity[0x1];
|
||||
u8 prio[0x1];
|
||||
};
|
||||
|
||||
@ -4750,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
|
||||
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_rq_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
@ -6208,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
|
||||
u8 reserved_at_e0[0x8];
|
||||
u8 table_miss_id[0x18];
|
||||
|
||||
u8 reserved_at_100[0x100];
|
||||
u8 reserved_at_100[0x8];
|
||||
u8 lag_master_next_table_id[0x18];
|
||||
|
||||
u8 reserved_at_120[0x80];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_create_flow_group_out_bits {
|
||||
@ -7662,7 +7689,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1,
|
||||
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
|
||||
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_flow_table_out_bits {
|
||||
@ -7701,7 +7729,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
|
||||
u8 reserved_at_e0[0x8];
|
||||
u8 table_miss_id[0x18];
|
||||
|
||||
u8 reserved_at_100[0x100];
|
||||
u8 reserved_at_100[0x8];
|
||||
u8 lag_master_next_table_id[0x18];
|
||||
|
||||
u8 reserved_at_120[0x80];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_ets_tcn_config_reg_bits {
|
||||
@ -7809,4 +7840,134 @@ struct mlx5_ifc_dcbx_param_bits {
|
||||
u8 error[0x8];
|
||||
u8 reserved_at_a0[0x160];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_lagc_bits {
|
||||
u8 reserved_at_0[0x1d];
|
||||
u8 lag_state[0x3];
|
||||
|
||||
u8 reserved_at_20[0x14];
|
||||
u8 tx_remap_affinity_2[0x4];
|
||||
u8 reserved_at_38[0x4];
|
||||
u8 tx_remap_affinity_1[0x4];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_create_lag_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_create_lag_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
struct mlx5_ifc_lagc_bits ctx;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_lag_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_lag_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x20];
|
||||
u8 field_select[0x20];
|
||||
|
||||
struct mlx5_ifc_lagc_bits ctx;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_lag_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
|
||||
struct mlx5_ifc_lagc_bits ctx;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_lag_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_destroy_lag_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_destroy_lag_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_create_vport_lag_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_create_vport_lag_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_destroy_vport_lag_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_destroy_vport_lag_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
#endif /* MLX5_IFC_H */
|
||||
|
@ -61,6 +61,39 @@ enum mlx5_an_status {
|
||||
#define MLX5_I2C_ADDR_HIGH 0x51
|
||||
#define MLX5_EEPROM_PAGE_LENGTH 256
|
||||
|
||||
enum mlx5e_link_mode {
|
||||
MLX5E_1000BASE_CX_SGMII = 0,
|
||||
MLX5E_1000BASE_KX = 1,
|
||||
MLX5E_10GBASE_CX4 = 2,
|
||||
MLX5E_10GBASE_KX4 = 3,
|
||||
MLX5E_10GBASE_KR = 4,
|
||||
MLX5E_20GBASE_KR2 = 5,
|
||||
MLX5E_40GBASE_CR4 = 6,
|
||||
MLX5E_40GBASE_KR4 = 7,
|
||||
MLX5E_56GBASE_R4 = 8,
|
||||
MLX5E_10GBASE_CR = 12,
|
||||
MLX5E_10GBASE_SR = 13,
|
||||
MLX5E_10GBASE_ER = 14,
|
||||
MLX5E_40GBASE_SR4 = 15,
|
||||
MLX5E_40GBASE_LR4 = 16,
|
||||
MLX5E_50GBASE_SR2 = 18,
|
||||
MLX5E_100GBASE_CR4 = 20,
|
||||
MLX5E_100GBASE_SR4 = 21,
|
||||
MLX5E_100GBASE_KR4 = 22,
|
||||
MLX5E_100GBASE_LR4 = 23,
|
||||
MLX5E_100BASE_TX = 24,
|
||||
MLX5E_1000BASE_T = 25,
|
||||
MLX5E_10GBASE_T = 26,
|
||||
MLX5E_25GBASE_CR = 27,
|
||||
MLX5E_25GBASE_KR = 28,
|
||||
MLX5E_25GBASE_SR = 29,
|
||||
MLX5E_50GBASE_CR2 = 30,
|
||||
MLX5E_50GBASE_KR2 = 31,
|
||||
MLX5E_LINK_MODES_NUMBER,
|
||||
};
|
||||
|
||||
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
|
||||
|
||||
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
|
||||
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
|
||||
int ptys_size, int proto_mask, u8 local_port);
|
||||
@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
|
||||
u32 *proto_admin, int proto_mask);
|
||||
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
|
||||
u8 *link_width_oper, u8 local_port);
|
||||
int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
|
||||
u8 *proto_oper, int proto_mask,
|
||||
u8 local_port);
|
||||
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
|
||||
u8 *proto_oper, u8 local_port);
|
||||
int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
|
||||
u32 *proto_oper, u8 local_port);
|
||||
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
|
||||
u32 proto_admin, int proto_mask);
|
||||
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
|
||||
|
Loading…
Reference in New Issue
Block a user