net/mlx5: Allocate completion EQs dynamically

This commit enables the dynamic allocation of EQs at runtime, allowing
for more flexibility in managing completion EQs and reducing the memory
overhead of driver load. Whenever a CQ is created for a given vector
index, the driver will lookup to see if there is an already mapped
completion EQ for that vector, if so, utilize it. Otherwise, allocate a
new EQ on demand and then utilize it for the CQ completion events.

Add a protection lock to the EQ table to protect from concurrent EQ
creation attempts.

While at it, replace mlx5_vector2irqn()/mlx5_vector2eqn() with
mlx5_comp_eqn_get() and mlx5_comp_irqn_get() which will allocate an
EQ on demand if no EQ is found for the given vector.

Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Maher Sanalla 2023-06-12 10:13:50 +03:00 committed by Saeed Mahameed
parent 54c5297801
commit f14c1a14e6
11 changed files with 54 additions and 48 deletions

View File

@ -993,7 +993,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
err = mlx5_comp_eqn_get(dev->mdev, vector, &eqn);
if (err)
goto err_cqb;

View File

@ -1002,7 +1002,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn);
if (err < 0)
return err;

View File

@ -1989,7 +1989,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int eqn;
int err;
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn);
if (err)
return err;
@ -2452,7 +2452,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
unsigned int irq;
int err;
err = mlx5_vector2irqn(priv->mdev, ix, &irq);
err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
if (err)
return err;

View File

@ -58,6 +58,7 @@ struct mlx5_eq_table {
struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */
struct mutex comp_lock; /* sync comp eqs creations */
int curr_comp_eqs;
int max_comp_eqs;
struct mlx5_irq_table *irq_table;
@ -457,6 +458,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
cpumask_clear(&eq_table->used_cpus);
xa_init(&eq_table->comp_eqs);
xa_init(&eq_table->comp_irqs);
mutex_init(&eq_table->comp_lock);
eq_table->curr_comp_eqs = 0;
return 0;
}
@ -985,6 +987,7 @@ static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
return MLX5_COMP_EQ_SIZE;
}
/* Must be called with EQ table comp_lock held */
static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@ -994,6 +997,13 @@ static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
int nent;
int err;
lockdep_assert_held(&table->comp_lock);
if (table->curr_comp_eqs == table->max_comp_eqs) {
mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n",
table->max_comp_eqs);
return -ENOMEM;
}
err = comp_irq_request(dev, vecidx);
if (err)
return err;
@ -1033,7 +1043,7 @@ static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
goto disable_eq;
table->curr_comp_eqs++;
return 0;
return eq->core.eqn;
disable_eq:
mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
@ -1044,32 +1054,47 @@ clean_irq:
return err;
}
static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
int ret = 0;
eq = xa_load(&table->comp_eqs, vector);
if (!eq)
return -ENOENT;
if (irqn)
*irqn = eq->core.irqn;
if (eqn)
mutex_lock(&table->comp_lock);
eq = xa_load(&table->comp_eqs, vecidx);
if (eq) {
*eqn = eq->core.eqn;
goto out;
}
ret = create_comp_eq(dev, vecidx);
if (ret < 0) {
mutex_unlock(&table->comp_lock);
return ret;
}
*eqn = ret;
out:
mutex_unlock(&table->comp_lock);
return 0;
}
EXPORT_SYMBOL(mlx5_comp_eqn_get);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{
return vector2eqnirqn(dev, vector, eqn, NULL);
}
EXPORT_SYMBOL(mlx5_vector2eqn);
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
int eqn;
int err;
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{
return vector2eqnirqn(dev, vector, NULL, irqn);
/* Allocate the EQ if not allocated yet */
err = mlx5_comp_eqn_get(dev, vector, &eqn);
if (err)
return err;
eq = xa_load(&table->comp_eqs, vector);
*irqn = eq->core.irqn;
return 0;
}
unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
@ -1119,10 +1144,9 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
struct mlx5_eq_comp *eq;
unsigned long index;
xa_for_each(&table->comp_eqs, index, eq) {
xa_for_each(&table->comp_eqs, index, eq)
if (eq->core.eqn == eqn)
return eq;
}
return ERR_PTR(-ENOENT);
}
@ -1130,11 +1154,7 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
mlx5_irq_table_free_irqs(dev);
mutex_unlock(&table->lock);
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
@ -1176,7 +1196,6 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
int i;
eq_table->max_comp_eqs = get_num_eqs(dev);
err = create_async_eqs(dev);
@ -1191,21 +1210,8 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
goto err_rmap;
}
for (i = 0; i < eq_table->max_comp_eqs; i++) {
err = create_comp_eq(dev, i);
if (err < 0)
break;
}
if (!i) {
mlx5_core_err(dev, "Failed to create completion EQs\n");
goto err_comp_eqs;
}
return 0;
err_comp_eqs:
free_rmap(dev);
err_rmap:
destroy_async_eqs(dev);
err_async_eqs:

View File

@ -445,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto err_cqwq;
}
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
err = mlx5_comp_eqn_get(mdev, smp_processor_id(), &eqn);
if (err) {
kvfree(in);
goto err_cqwq;

View File

@ -81,7 +81,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
int inlen, eqn;
int err;
err = mlx5_vector2eqn(mdev, 0, &eqn);
err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err)
return err;

View File

@ -104,6 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
#endif

View File

@ -1097,7 +1097,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
goto err_cqwq;
vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn);
err = mlx5_comp_eqn_get(mdev, vector, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;

View File

@ -580,7 +580,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
/* Use vector 0 by default. Consider adding code to choose least used
* vector.
*/
err = mlx5_vector2eqn(mdev, 0, &eqn);
err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err)
goto err_vec;

View File

@ -1026,7 +1026,7 @@ static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
}
vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn);
err = mlx5_comp_eqn_get(mdev, vector, &eqn);
if (err)
goto err_vec;

View File

@ -1058,7 +1058,7 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);