IB/mlx5: Support per device q counters in switchdev mode

When parent mlx5_core_dev is in switchdev mode, q_counters are not
applicable to multiple non uplink vports.
Hence, have make them limited to device level.

While at it, correct __mlx5_ib_qp_set_counter() and
__mlx5_ib_modify_qp() to use u16 set_id as defined by the device.

Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190723073117.7175-3-leon@kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Parav Pandit 2019-07-23 10:31:17 +03:00 committed by Doug Ledford
parent 5dcecbc967
commit 3e1f000ff7
3 changed files with 60 additions and 21 deletions

View File

@ -5322,11 +5322,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
}; };
static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
{
return MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
MLX5_ESWITCH_OFFLOADS;
}
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{ {
int num_cnt_ports;
int i; int i;
for (i = 0; i < dev->num_ports; i++) { num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
for (i = 0; i < num_cnt_ports; i++) {
if (dev->port[i].cnts.set_id_valid) if (dev->port[i].cnts.set_id_valid)
mlx5_core_dealloc_q_counter(dev->mdev, mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].cnts.set_id); dev->port[i].cnts.set_id);
@ -5428,13 +5438,15 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{ {
int num_cnt_ports;
int err = 0; int err = 0;
int i; int i;
bool is_shared; bool is_shared;
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
for (i = 0; i < dev->num_ports; i++) { for (i = 0; i < num_cnt_ports; i++) {
err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
if (err) if (err)
goto err_alloc; goto err_alloc;
@ -5454,7 +5466,6 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
} }
dev->port[i].cnts.set_id_valid = true; dev->port[i].cnts.set_id_valid = true;
} }
return 0; return 0;
err_alloc: err_alloc:
@ -5462,16 +5473,41 @@ err_alloc:
return err; return err;
} }
static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
u8 port_num)
{
return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
&dev->port[port_num].cnts;
}
/**
* mlx5_ib_get_counters_id - Returns counters id to use for device+port
* @dev: Pointer to mlx5 IB device
* @port_num: Zero based port number
*
* mlx5_ib_get_counters_id() Returns counters set id to use for given
* device port combination in switchdev and non switchdev mode of the
* parent device.
*/
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
{
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
return cnts->set_id;
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
u8 port_num) u8 port_num)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; const struct mlx5_ib_counters *cnts;
bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
/* We support only per port stats */ if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
if (port_num == 0)
return NULL; return NULL;
cnts = get_counters(dev, port_num - 1);
return rdma_alloc_hw_stats_struct(cnts->names, return rdma_alloc_hw_stats_struct(cnts->names,
cnts->num_q_counters + cnts->num_q_counters +
cnts->num_cong_counters + cnts->num_cong_counters +
@ -5538,7 +5574,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
u8 port_num, int index) u8 port_num, int index)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
int ret, num_counters; int ret, num_counters;
u8 mdev_port_num; u8 mdev_port_num;
@ -5592,7 +5628,7 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
{ {
struct mlx5_ib_dev *dev = to_mdev(counter->device); struct mlx5_ib_dev *dev = to_mdev(counter->device);
const struct mlx5_ib_counters *cnts = const struct mlx5_ib_counters *cnts =
&dev->port[counter->port - 1].cnts; get_counters(dev, counter->port - 1);
/* Q counters are in the beginning of all counters */ /* Q counters are in the beginning of all counters */
return rdma_alloc_hw_stats_struct(cnts->names, return rdma_alloc_hw_stats_struct(cnts->names,
@ -5603,7 +5639,8 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
{ {
struct mlx5_ib_dev *dev = to_mdev(counter->device); struct mlx5_ib_dev *dev = to_mdev(counter->device);
struct mlx5_ib_counters *cnts = &dev->port[counter->port - 1].cnts; const struct mlx5_ib_counters *cnts =
get_counters(dev, counter->port - 1);
return mlx5_ib_query_q_counters(dev->mdev, cnts, return mlx5_ib_query_q_counters(dev->mdev, cnts,
counter->stats, counter->id); counter->stats, counter->id);

View File

@ -1474,4 +1474,5 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
bool dyn_bfreg); bool dyn_bfreg);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
#endif /* MLX5_IB_H */ #endif /* MLX5_IB_H */

View File

@ -3387,19 +3387,16 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp); struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_qp_context context = {}; struct mlx5_qp_context context = {};
struct mlx5_ib_port *mibport = NULL;
struct mlx5_ib_qp_base *base; struct mlx5_ib_qp_base *base;
u32 set_id; u32 set_id;
if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
return 0; return 0;
if (counter) { if (counter)
set_id = counter->id; set_id = counter->id;
} else { else
mibport = &dev->port[mqp->port - 1]; set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
set_id = mibport->cnts.set_id;
}
base = &mqp->trans_qp.base; base = &mqp->trans_qp.base;
context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
@ -3460,7 +3457,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context; struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd; struct mlx5_ib_pd *pd;
struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new; enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar; enum mlx5_qp_optpar optpar;
u32 set_id = 0; u32 set_id = 0;
@ -3625,11 +3621,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (qp->flags & MLX5_IB_QP_UNDERLAY) if (qp->flags & MLX5_IB_QP_UNDERLAY)
port_num = 0; port_num = 0;
mibport = &dev->port[port_num];
if (ibqp->counter) if (ibqp->counter)
set_id = ibqp->counter->id; set_id = ibqp->counter->id;
else else
set_id = mibport->cnts.set_id; set_id = mlx5_ib_get_counters_id(dev, port_num);
context->qp_counter_set_usr_page |= context->qp_counter_set_usr_page |=
cpu_to_be32(set_id << 24); cpu_to_be32(set_id << 24);
} }
@ -3818,6 +3813,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u16 set_id;
required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (!is_valid_mask(attr_mask, required, 0)) if (!is_valid_mask(attr_mask, required, 0))
return -EINVAL; return -EINVAL;
@ -3844,7 +3841,9 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
MLX5_SET(dctc, dctc, port, attr->port_num); MLX5_SET(dctc, dctc, port, attr->port_num);
MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {}; struct mlx5_ib_modify_qp_resp resp = {};
@ -6328,11 +6327,13 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
} }
if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
u16 set_id;
set_id = mlx5_ib_get_counters_id(dev, 0);
if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
MLX5_SET(rqc, rqc, counter_set_id, MLX5_SET(rqc, rqc, counter_set_id, set_id);
dev->port->cnts.set_id);
} else } else
dev_info_once( dev_info_once(
&dev->ib_dev.dev, &dev->ib_dev.dev,