bnxt_en: Remove runtime interrupt vector allocation

Modified the bnxt_en code to create and pre-configure RDMA devices
with the right MSI-X vector count for the ROCE driver to use.
This is to align the ROCE driver to the auxiliary device model which
will simply bind the driver without getting into PCI-related handling.
All PCI-related logic will now be in the bnxt_en driver.

Suggested-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
This commit is contained in:
Ajit Khaparde 2022-12-12 10:36:29 -08:00
parent a43c26fa2e
commit 3034322113
4 changed files with 68 additions and 167 deletions

View File

@ -129,7 +129,6 @@ struct bnxt_re_dev {
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
int id;

View File

@ -262,7 +262,7 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
struct bnxt_qplib_nq *nq;
int indx, rc;
@ -281,7 +281,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
* in device sctructure.
*/
for (indx = 0; indx < rdev->num_msix; indx++)
rdev->msix_entries[indx].vector = ent[indx].vector;
rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
false);
@ -315,32 +315,6 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
return rc;
}
static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
{
int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
struct bnxt_en_dev *en_dev;
en_dev = rdev->en_dev;
num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
num_msix_got = bnxt_req_msix_vecs(en_dev,
rdev->msix_entries,
num_msix_want);
if (num_msix_got < BNXT_RE_MIN_MSIX) {
rc = -EINVAL;
goto done;
}
if (num_msix_got != num_msix_want) {
ibdev_warn(&rdev->ibdev,
"Requested %d MSI-X vectors, got %d\n",
num_msix_want, num_msix_got);
}
rdev->num_msix = num_msix_got;
done:
return rc;
}
static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
u16 opcd, u16 crid, u16 trid)
{
@ -785,7 +759,7 @@ static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) :
rdev->msix_entries[indx].db_offset;
rdev->en_dev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@ -810,7 +784,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
for (i = 1; i < rdev->num_msix ; i++) {
db_offt = bnxt_re_get_nqdb_offset(rdev, i);
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->msix_entries[i].vector,
i - 1, rdev->en_dev->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
if (rc) {
@ -897,7 +871,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev,
@ -1217,7 +1191,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
bnxt_free_msix_vecs(rdev->en_dev);
rdev->num_msix = 0;
bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
@ -1262,13 +1236,15 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
rc = bnxt_re_request_msix(rdev);
if (rc) {
if (!rdev->en_dev->ulp_tbl->msix_requested) {
ibdev_err(&rdev->ibdev,
"Failed to get MSI-X vectors: %#x\n", rc);
rc = -EINVAL;
goto fail;
}
ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
rdev->en_dev->ulp_tbl->msix_requested);
rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
bnxt_re_query_hwrm_intf_version(rdev);
@ -1292,14 +1268,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, rdev->is_virtfn,
&bnxt_re_aeq_handler);

View File

@ -28,68 +28,15 @@
static DEFINE_IDA(bnxt_aux_dev_ids);
int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp_ops *ulp_ops,
void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->cp_nr_rings == max_stat_ctxs)
return -ENOMEM;
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
return -ENOMEM;
edev->ulp_tbl = ulp;
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, 0);
return 0;
}
EXPORT_SYMBOL(bnxt_register_dev);
void bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
int i = 0;
ulp = edev->ulp_tbl;
if (ulp->msix_requested)
bnxt_free_msix_vecs(edev);
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
msleep(100);
i++;
}
kfree(ulp);
edev->ulp_tbl = NULL;
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
if (!edev->ulp_tbl->msix_requested) {
netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
return;
}
num_msix = edev->ulp_tbl->msix_requested;
idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
@ -105,99 +52,69 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
int bnxt_req_msix_vecs(struct bnxt_en_dev *edev,
struct bnxt_msix_entry *ent,
int num_msix)
int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp_ops *ulp_ops,
void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_hw_resc *hw_resc;
int max_idx, max_cp_rings;
int avail_msix, idx;
int total_vecs;
int rc = 0;
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
return -ENODEV;
if (edev->ulp_tbl->msix_requested)
return -EAGAIN;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
avail_msix = bnxt_get_avail_msix(bp, num_msix);
if (!avail_msix)
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->cp_nr_rings == max_stat_ctxs)
return -ENOMEM;
if (avail_msix > num_msix)
avail_msix = num_msix;
if (BNXT_NEW_RM(bp)) {
idx = bp->cp_nr_rings;
} else {
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
idx = max_idx - avail_msix;
}
edev->ulp_tbl->msix_base = idx;
edev->ulp_tbl->msix_requested = avail_msix;
hw_resc = &bp->hw_resc;
total_vecs = idx + avail_msix;
rtnl_lock();
if (bp->total_irqs < total_vecs ||
(BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
if (netif_running(dev)) {
bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_reserve_rings(bp, true);
}
}
rtnl_unlock();
if (rc) {
edev->ulp_tbl->msix_requested = 0;
return -EAGAIN;
}
ulp = edev->ulp_tbl;
if (!ulp)
return -ENOMEM;
if (BNXT_NEW_RM(bp)) {
int resv_msix;
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
avail_msix = min_t(int, resv_msix, avail_msix);
edev->ulp_tbl->msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, 0);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
return 0;
}
EXPORT_SYMBOL(bnxt_req_msix_vecs);
EXPORT_SYMBOL(bnxt_register_dev);
void bnxt_free_msix_vecs(struct bnxt_en_dev *edev)
void bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
int i = 0;
if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
edev->ulp_tbl->msix_requested = 0;
ulp = edev->ulp_tbl;
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
rtnl_lock();
if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
bnxt_close_nic(bp, true, false);
bnxt_open_nic(bp, true, false);
}
rtnl_unlock();
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
msleep(100);
i++;
}
return;
}
EXPORT_SYMBOL(bnxt_free_msix_vecs);
EXPORT_SYMBOL(bnxt_unregister_dev);
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
u32 roce_msix = BNXT_VF(bp) ?
BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
return edev->ulp_tbl->msix_requested;
}
return 0;
return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
min_t(u32, roce_msix, num_online_cpus()) : 0);
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
@ -402,6 +319,7 @@ static void bnxt_aux_dev_release(struct device *dev)
container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
ida_free(&bnxt_aux_dev_ids, aux_priv->id);
kfree(aux_priv->edev->ulp_tbl);
kfree(aux_priv->edev);
kfree(aux_priv);
}
@ -424,6 +342,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
}
void bnxt_rdma_aux_device_init(struct bnxt *bp)
@ -431,6 +351,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
struct auxiliary_device *aux_dev;
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
struct bnxt_ulp *ulp;
int rc;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
@ -470,6 +391,11 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);

View File

@ -15,6 +15,8 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
#define BNXT_MAX_ROCE_MSIX 9
#define BNXT_MAX_VF_ROCE_MSIX 2
struct hwrm_async_event_cmpl;
struct bnxt;
@ -51,6 +53,7 @@ struct bnxt_ulp {
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
@ -101,9 +104,6 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, struct bnxt_msix_entry *ent,
int num_msix);
void bnxt_free_msix_vecs(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id);