net/mlx5: Lag, expose number of lag ports

Downstream patches will add support for hardware lag with
more than 2 ports. Add a way for users to query the number of lag ports.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Mark Bloch 2022-03-01 15:42:01 +00:00 committed by Saeed Mahameed
parent 37ca95e62e
commit 34a30d7635
6 changed files with 11 additions and 2 deletions

View File

@ -100,7 +100,7 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
port_type) == MLX5_CAP_PORT_TYPE_IB) port_type) == MLX5_CAP_PORT_TYPE_IB)
num_qps = pd->device->attrs.max_pkeys; num_qps = pd->device->attrs.max_pkeys;
else if (dev->lag_active) else if (dev->lag_active)
num_qps = MLX5_MAX_PORTS; num_qps = dev->lag_ports;
} }
gsi = &mqp->gsi; gsi = &mqp->gsi;

View File

@ -2991,6 +2991,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
} }
dev->flow_db->lag_demux_ft = ft; dev->flow_db->lag_demux_ft = ft;
dev->lag_ports = mlx5_lag_get_num_ports(mdev);
dev->lag_active = true; dev->lag_active = true;
return 0; return 0;

View File

@ -1131,6 +1131,7 @@ struct mlx5_ib_dev {
struct xarray sig_mrs; struct xarray sig_mrs;
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
u16 pkey_table_len; u16 pkey_table_len;
u8 lag_ports;
}; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)

View File

@ -3907,7 +3907,7 @@ static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity; tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
return (unsigned int)atomic_add_return(1, tx_port_affinity) % return (unsigned int)atomic_add_return(1, tx_port_affinity) %
MLX5_MAX_PORTS + 1; (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1;
} }
static bool qp_supports_affinity(struct mlx5_ib_qp *qp) static bool qp_supports_affinity(struct mlx5_ib_qp *qp)

View File

@ -1185,6 +1185,12 @@ unlock:
} }
EXPORT_SYMBOL(mlx5_lag_get_slave_port); EXPORT_SYMBOL(mlx5_lag_get_slave_port);
u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
{
return MLX5_MAX_PORTS;
}
EXPORT_SYMBOL(mlx5_lag_get_num_ports);
struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev) struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
{ {
struct mlx5_core_dev *peer_dev = NULL; struct mlx5_core_dev *peer_dev = NULL;

View File

@ -1142,6 +1142,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
int num_counters, int num_counters,
size_t *offsets); size_t *offsets);
struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,