RDMA/bnxt_re: Share a page to expose per CQ info with userspace

Gen P7 adapters needs to share a toggle bits information received
in kernel driver with the user space. User space needs this
info during the request notify call back to arm the CQ.

User space application can get this page using the
UAPI routines. Library will mmap this page and get the
toggle bits to be used in the next ARM Doorbell.

Uses a hash list to map the CQ structure from the CQ ID.
CQ structure is retrieved from the hash list while the
library calls the UAPI routine to get the toggle page
mapping. Currently the full page is mapped per CQ. This
can be optimized to enable multiple CQs from the same
application share the same page and different offsets
in the page.

Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Link: https://lore.kernel.org/r/1702535484-26844-3-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Selvin Xavier 2023-12-13 22:31:24 -08:00 committed by Leon Romanovsky
parent 9b0a7a2cb8
commit e275919d96
6 changed files with 78 additions and 7 deletions

View File

@ -41,6 +41,7 @@
#define __BNXT_RE_H__
#include <rdma/uverbs_ioctl.h>
#include "hw_counters.h"
#include <linux/hashtable.h>
#define ROCE_DRV_MODULE_NAME "bnxt_re"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
@ -135,6 +136,7 @@ struct bnxt_re_pacing {
#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
#define MAX_CQ_HASH_BITS (16)
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
@ -189,6 +191,7 @@ struct bnxt_re_dev {
struct bnxt_re_pacing pacing;
struct work_struct dbq_fifo_check_work;
struct delayed_work dbq_pacing_work;
DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
};
#define to_bnxt_re_dev(ptr, member) \

View File

@ -50,6 +50,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>
#include "bnxt_ulp.h"
@ -2910,14 +2911,20 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct bnxt_re_cq *cq;
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
nq = cq->qplib_cq.nq;
cctx = rdev->chip_ctx;
if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
free_page((unsigned long)cq->uctx_cq_page);
hash_del(&cq->hash_entry);
}
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
ib_umem_release(cq->umem);
@ -2935,10 +2942,11 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
int rc, entries;
int cqe = attr->cqe;
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq = NULL;
int rc = -ENOMEM, entries;
unsigned int nq_alloc_cnt;
int cqe = attr->cqe;
u32 active_cqs;
if (attr->flags)
@ -2951,6 +2959,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->rdev = rdev;
cctx = rdev->chip_ctx;
cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
entries = bnxt_re_init_depth(cqe + 1, uctx);
@ -3012,22 +3021,32 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&cq->cq_lock);
if (udata) {
struct bnxt_re_cq_resp resp;
struct bnxt_re_cq_resp resp = {};
if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
/* Allocate a page */
cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
if (!cq->uctx_cq_page)
goto c2fail;
resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
}
resp.cqid = cq->qplib_cq.id;
resp.tail = cq->qplib_cq.hwq.cons;
resp.phase = cq->qplib_cq.period;
resp.rsvd = 0;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
goto c2fail;
goto free_mem;
}
}
return 0;
free_mem:
free_page((unsigned long)cq->uctx_cq_page);
c2fail:
ib_umem_release(cq->umem);
fail:
@ -4214,6 +4233,19 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
}
}
static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
{
struct bnxt_re_cq *cq = NULL, *tmp_cq;
hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
if (tmp_cq->qplib_cq.id == cq_id) {
cq = tmp_cq;
break;
}
}
return cq;
}
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
@ -4442,10 +4474,12 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
struct bnxt_re_ucontext *uctx;
struct ib_ucontext *ib_uctx;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
u64 mem_offset;
u64 addr = 0;
u32 length;
u32 offset;
u32 cq_id;
int err;
ib_uctx = ib_uverbs_get_ucontext(attrs);
@ -4461,6 +4495,19 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
switch (res_type) {
case BNXT_RE_CQ_TOGGLE_MEM:
err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
if (err)
return err;
cq = bnxt_re_search_for_cq(rdev, cq_id);
if (!cq)
return -EINVAL;
length = PAGE_SIZE;
addr = (u64)cq->uctx_cq_page;
mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
offset = 0;
break;
case BNXT_RE_SRQ_TOGGLE_MEM:
break;

View File

@ -108,6 +108,8 @@ struct bnxt_re_cq {
struct ib_umem *umem;
struct ib_umem *resize_umem;
int resize_cqe;
void *uctx_cq_page;
struct hlist_node hash_entry;
};
struct bnxt_re_mr {

View File

@ -54,6 +54,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
#include <linux/hashtable.h>
#include "bnxt_ulp.h"
#include "roce_hsi.h"
@ -136,6 +137,8 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
}
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
@ -1206,9 +1209,13 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
{
struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
qplib_cq);
u32 *cq_ptr;
if (cq->ib_cq.comp_handler) {
/* Lock comp_handler? */
if (cq->uctx_cq_page) {
cq_ptr = (u32 *)cq->uctx_cq_page;
*cq_ptr = cq->qplib_cq.toggle;
}
(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
}
@ -1730,6 +1737,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
*/
bnxt_re_vf_res_config(rdev);
}
hash_init(rdev->cq_hash);
return 0;
free_sctx:

View File

@ -55,6 +55,12 @@ struct bnxt_qplib_drv_modes {
u8 wqe_mode;
bool db_push;
bool dbr_pacing;
u32 toggle_bits;
};
enum bnxt_re_toggle_modes {
BNXT_QPLIB_CQ_TOGGLE_BIT = 0x1,
BNXT_QPLIB_SRQ_TOGGLE_BIT = 0x2,
};
struct bnxt_qplib_chip_ctx {

View File

@ -102,11 +102,16 @@ struct bnxt_re_cq_req {
__aligned_u64 cq_handle;
};
enum bnxt_re_cq_mask {
BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT = 0x1,
};
struct bnxt_re_cq_resp {
__u32 cqid;
__u32 tail;
__u32 phase;
__u32 rsvd;
__aligned_u64 comp_mask;
};
struct bnxt_re_resize_cq_req {