mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
RDMA/mana_ib: Implement uapi to create and destroy RC QP
Implement user requests to create and destroy an RC QP. As the user does not have an FMR queue, it is skipped and NO_FMR flag is used. Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Link: https://lore.kernel.org/r/1716366242-558-3-git-send-email-kotaranov@linux.microsoft.com Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev> Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
53657a0419
commit
fdefb91849
@ -248,6 +248,10 @@ struct mana_rnic_destroy_cq_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
}; /* HW Data */
|
||||
|
||||
enum mana_rnic_create_rc_flags {
|
||||
MANA_RC_FLAG_NO_FMR = 2,
|
||||
};
|
||||
|
||||
struct mana_rnic_create_qp_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
mana_handle_t adapter;
|
||||
|
@ -398,6 +398,78 @@ err_free_vport:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *attr, struct ib_udata *udata)
|
||||
{
|
||||
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
|
||||
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
|
||||
struct mana_ib_create_rc_qp_resp resp = {};
|
||||
struct mana_ib_ucontext *mana_ucontext;
|
||||
struct mana_ib_create_rc_qp ucmd = {};
|
||||
int i, err, j;
|
||||
u64 flags = 0;
|
||||
u32 doorbell;
|
||||
|
||||
if (!udata || udata->inlen < sizeof(ucmd))
|
||||
return -EINVAL;
|
||||
|
||||
mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext);
|
||||
doorbell = mana_ucontext->doorbell;
|
||||
flags = MANA_RC_FLAG_NO_FMR;
|
||||
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
|
||||
if (err) {
|
||||
ibdev_dbg(&mdev->ib_dev, "Failed to copy from udata, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
|
||||
/* skip FMR for user-level RC QPs */
|
||||
if (i == MANA_RC_SEND_QUEUE_FMR) {
|
||||
qp->rc_qp.queues[i].id = INVALID_QUEUE_ID;
|
||||
qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
|
||||
continue;
|
||||
}
|
||||
err = mana_ib_create_queue(mdev, ucmd.queue_buf[j], ucmd.queue_size[j],
|
||||
&qp->rc_qp.queues[i]);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n", i, err);
|
||||
goto destroy_queues;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
|
||||
err = mana_ib_gd_create_rc_qp(mdev, qp, attr, doorbell, flags);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to create rc qp %d\n", err);
|
||||
goto destroy_queues;
|
||||
}
|
||||
qp->ibqp.qp_num = qp->rc_qp.queues[MANA_RC_RECV_QUEUE_RESPONDER].id;
|
||||
qp->port = attr->port_num;
|
||||
|
||||
if (udata) {
|
||||
for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
|
||||
if (i == MANA_RC_SEND_QUEUE_FMR)
|
||||
continue;
|
||||
resp.queue_id[j] = qp->rc_qp.queues[i].id;
|
||||
j++;
|
||||
}
|
||||
err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
|
||||
if (err) {
|
||||
ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
|
||||
goto destroy_qp;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_qp:
|
||||
mana_ib_gd_destroy_rc_qp(mdev, qp);
|
||||
destroy_queues:
|
||||
while (i-- > 0)
|
||||
mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@ -409,8 +481,9 @@ int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
|
||||
udata);
|
||||
|
||||
return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
|
||||
case IB_QPT_RC:
|
||||
return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata);
|
||||
default:
|
||||
/* Creating QP other than IB_QPT_RAW_PACKET is not supported */
|
||||
ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
|
||||
attr->qp_type);
|
||||
}
|
||||
@ -473,6 +546,22 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
|
||||
{
|
||||
struct mana_ib_dev *mdev =
|
||||
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
|
||||
int i;
|
||||
|
||||
/* Ignore return code as there is not much we can do about it.
|
||||
* The error message is printed inside.
|
||||
*/
|
||||
mana_ib_gd_destroy_rc_qp(mdev, qp);
|
||||
for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i)
|
||||
mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
{
|
||||
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
|
||||
@ -484,7 +573,8 @@ int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
udata);
|
||||
|
||||
return mana_ib_destroy_qp_raw(qp, udata);
|
||||
|
||||
case IB_QPT_RC:
|
||||
return mana_ib_destroy_rc_qp(qp, udata);
|
||||
default:
|
||||
ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
|
||||
ibqp->qp_type);
|
||||
|
@ -45,6 +45,15 @@ struct mana_ib_create_qp_resp {
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct mana_ib_create_rc_qp {
|
||||
__aligned_u64 queue_buf[4];
|
||||
__u32 queue_size[4];
|
||||
};
|
||||
|
||||
struct mana_ib_create_rc_qp_resp {
|
||||
__u32 queue_id[4];
|
||||
};
|
||||
|
||||
struct mana_ib_create_wq {
|
||||
__aligned_u64 wq_buf_addr;
|
||||
__u32 wq_buf_size;
|
||||
|
Loading…
Reference in New Issue
Block a user