mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
RDMA: Globally allocate and release QP memory
Convert QP object to follow IB/core general allocation scheme. That change allows us to make sure that restrack properly kref the memory. Link: https://lore.kernel.org/r/48e767124758aeecc433360ddd85eaa6325b34d9.1627040189.git.leonro@nvidia.com Reviewed-by: Gal Pressman <galpress@amazon.com> #efa Tested-by: Gal Pressman <galpress@amazon.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> #rdma and core Tested-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Tested-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
44da3730e0
commit
514aee660d
@ -322,13 +322,14 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
|
||||
struct ib_uqp_object *uobj, const char *caller)
|
||||
{
|
||||
struct ib_qp *qp;
|
||||
int ret;
|
||||
|
||||
if (!dev->ops.create_qp)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
qp = dev->ops.create_qp(pd, attr, udata);
|
||||
if (IS_ERR(qp))
|
||||
return qp;
|
||||
qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
qp->device = dev;
|
||||
qp->pd = pd;
|
||||
@ -337,14 +338,10 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
|
||||
|
||||
qp->qp_type = attr->qp_type;
|
||||
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
|
||||
qp->send_cq = attr->send_cq;
|
||||
qp->recv_cq = attr->recv_cq;
|
||||
qp->srq = attr->srq;
|
||||
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
|
||||
qp->event_handler = attr->event_handler;
|
||||
qp->port = attr->port_num;
|
||||
|
||||
atomic_set(&qp->usecnt, 0);
|
||||
spin_lock_init(&qp->mr_lock);
|
||||
INIT_LIST_HEAD(&qp->rdma_mrs);
|
||||
INIT_LIST_HEAD(&qp->sig_mrs);
|
||||
@ -352,8 +349,25 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
|
||||
rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
|
||||
WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
|
||||
rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
|
||||
ret = dev->ops.create_qp(qp, attr, udata);
|
||||
if (ret)
|
||||
goto err_create;
|
||||
|
||||
/*
|
||||
* TODO: The mlx4 internally overwrites send_cq and recv_cq.
|
||||
* Unfortunately, it is not an easy task to fix that driver.
|
||||
*/
|
||||
qp->send_cq = attr->send_cq;
|
||||
qp->recv_cq = attr->recv_cq;
|
||||
|
||||
rdma_restrack_add(&qp->res);
|
||||
return qp;
|
||||
|
||||
err_create:
|
||||
rdma_restrack_put(&qp->res);
|
||||
kfree(qp);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
}
|
||||
|
||||
struct rdma_dev_addr;
|
||||
|
@ -2654,6 +2654,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
||||
SET_DEVICE_OP(dev_ops, get_hw_stats);
|
||||
SET_DEVICE_OP(dev_ops, get_link_layer);
|
||||
SET_DEVICE_OP(dev_ops, get_netdev);
|
||||
SET_DEVICE_OP(dev_ops, get_numa_node);
|
||||
SET_DEVICE_OP(dev_ops, get_port_immutable);
|
||||
SET_DEVICE_OP(dev_ops, get_vector_affinity);
|
||||
SET_DEVICE_OP(dev_ops, get_vf_config);
|
||||
@ -2710,6 +2711,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
||||
SET_OBJ_SIZE(dev_ops, ib_cq);
|
||||
SET_OBJ_SIZE(dev_ops, ib_mw);
|
||||
SET_OBJ_SIZE(dev_ops, ib_pd);
|
||||
SET_OBJ_SIZE(dev_ops, ib_qp);
|
||||
SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
|
||||
SET_OBJ_SIZE(dev_ops, ib_srq);
|
||||
SET_OBJ_SIZE(dev_ops, ib_ucontext);
|
||||
|
@ -343,7 +343,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
|
||||
rt = &dev->res[res->type];
|
||||
|
||||
old = xa_erase(&rt->xa, res->id);
|
||||
if (res->type == RDMA_RESTRACK_MR || res->type == RDMA_RESTRACK_QP)
|
||||
if (res->type == RDMA_RESTRACK_MR)
|
||||
return;
|
||||
WARN_ON(old != res);
|
||||
|
||||
|
@ -1963,30 +1963,32 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
|
||||
rdma_rw_cleanup_mrs(qp);
|
||||
|
||||
rdma_counter_unbind_qp(qp, true);
|
||||
rdma_restrack_del(&qp->res);
|
||||
ret = qp->device->ops.destroy_qp(qp, udata);
|
||||
if (!ret) {
|
||||
if (alt_path_sgid_attr)
|
||||
rdma_put_gid_attr(alt_path_sgid_attr);
|
||||
if (av_sgid_attr)
|
||||
rdma_put_gid_attr(av_sgid_attr);
|
||||
if (pd)
|
||||
atomic_dec(&pd->usecnt);
|
||||
if (scq)
|
||||
atomic_dec(&scq->usecnt);
|
||||
if (rcq)
|
||||
atomic_dec(&rcq->usecnt);
|
||||
if (srq)
|
||||
atomic_dec(&srq->usecnt);
|
||||
if (ind_tbl)
|
||||
atomic_dec(&ind_tbl->usecnt);
|
||||
if (sec)
|
||||
ib_destroy_qp_security_end(sec);
|
||||
} else {
|
||||
if (ret) {
|
||||
if (sec)
|
||||
ib_destroy_qp_security_abort(sec);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (alt_path_sgid_attr)
|
||||
rdma_put_gid_attr(alt_path_sgid_attr);
|
||||
if (av_sgid_attr)
|
||||
rdma_put_gid_attr(av_sgid_attr);
|
||||
if (pd)
|
||||
atomic_dec(&pd->usecnt);
|
||||
if (scq)
|
||||
atomic_dec(&scq->usecnt);
|
||||
if (rcq)
|
||||
atomic_dec(&rcq->usecnt);
|
||||
if (srq)
|
||||
atomic_dec(&srq->usecnt);
|
||||
if (ind_tbl)
|
||||
atomic_dec(&ind_tbl->usecnt);
|
||||
if (sec)
|
||||
ib_destroy_qp_security_end(sec);
|
||||
|
||||
rdma_restrack_del(&qp->res);
|
||||
kfree(qp);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_destroy_qp_user);
|
||||
|
@ -815,7 +815,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
|
||||
rc = bnxt_re_destroy_gsi_sqp(qp);
|
||||
if (rc)
|
||||
goto sh_fail;
|
||||
return rc;
|
||||
}
|
||||
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
@ -826,10 +826,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
|
||||
kfree(qp);
|
||||
return 0;
|
||||
sh_fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u8 __from_ib_qp_type(enum ib_qp_type type)
|
||||
@ -1402,27 +1399,22 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
|
||||
struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata)
|
||||
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_pd *ib_pd = ib_qp->pd;
|
||||
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
|
||||
struct bnxt_re_dev *rdev = pd->rdev;
|
||||
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
|
||||
struct bnxt_re_qp *qp;
|
||||
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
|
||||
int rc;
|
||||
|
||||
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
|
||||
if (!rc) {
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
rc = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
qp->rdev = rdev;
|
||||
rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
|
||||
if (rc)
|
||||
@ -1465,16 +1457,14 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
atomic_inc(&rdev->qp_count);
|
||||
|
||||
return &qp->ib_qp;
|
||||
return 0;
|
||||
qp_destroy:
|
||||
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
||||
free_umem:
|
||||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
fail:
|
||||
kfree(qp);
|
||||
exit:
|
||||
return ERR_PTR(rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u8 __from_ib_qp_state(enum ib_qp_state state)
|
||||
|
@ -78,9 +78,9 @@ struct bnxt_re_srq {
|
||||
};
|
||||
|
||||
struct bnxt_re_qp {
|
||||
struct ib_qp ib_qp;
|
||||
struct list_head list;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_qp ib_qp;
|
||||
spinlock_t sq_lock; /* protect sq */
|
||||
spinlock_t rq_lock; /* protect rq */
|
||||
struct bnxt_qplib_qp qplib_qp;
|
||||
@ -179,9 +179,8 @@ int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
|
||||
int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
|
||||
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
|
||||
const struct ib_recv_wr **bad_recv_wr);
|
||||
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_udata *udata);
|
||||
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
|
||||
|
@ -709,6 +709,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
|
||||
};
|
||||
|
@ -989,9 +989,8 @@ int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
|
||||
int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
|
||||
struct ib_udata *udata);
|
||||
int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
|
||||
struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata);
|
||||
int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata);
|
||||
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
@ -499,6 +499,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, c4iw_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
|
||||
};
|
||||
|
@ -2103,16 +2103,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
|
||||
|
||||
c4iw_put_wr_wait(qhp->wr_waitp);
|
||||
|
||||
kfree(qhp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_pd *pd = qp->pd;
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_qp *qhp;
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(qp);
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_cq *schp;
|
||||
struct c4iw_cq *rchp;
|
||||
@ -2124,44 +2123,36 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
|
||||
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
|
||||
|
||||
pr_debug("ib_pd %p\n", pd);
|
||||
|
||||
if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
|
||||
rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
|
||||
if (!schp || !rchp)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (!attrs->srq) {
|
||||
if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
|
||||
return ERR_PTR(-E2BIG);
|
||||
return -E2BIG;
|
||||
rqsize = attrs->cap.max_recv_wr + 1;
|
||||
if (rqsize < 8)
|
||||
rqsize = 8;
|
||||
}
|
||||
|
||||
if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
|
||||
return ERR_PTR(-E2BIG);
|
||||
return -E2BIG;
|
||||
sqsize = attrs->cap.max_send_wr + 1;
|
||||
if (sqsize < 8)
|
||||
sqsize = 8;
|
||||
|
||||
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
|
||||
if (!qhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
|
||||
if (!qhp->wr_waitp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_qhp;
|
||||
}
|
||||
if (!qhp->wr_waitp)
|
||||
return -ENOMEM;
|
||||
|
||||
qhp->wq.sq.size = sqsize;
|
||||
qhp->wq.sq.memsize =
|
||||
@ -2339,7 +2330,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
|
||||
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
|
||||
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
|
||||
return &qhp->ibqp;
|
||||
return 0;
|
||||
err_free_ma_sync_key:
|
||||
kfree(ma_sync_key_mm);
|
||||
err_free_rq_db_key:
|
||||
@ -2359,9 +2350,7 @@ err_destroy_qp:
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(qhp->wr_waitp);
|
||||
err_free_qhp:
|
||||
kfree(qhp);
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
@ -132,9 +132,8 @@ int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
|
||||
int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
|
||||
struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
|
@ -271,6 +271,7 @@ static const struct ib_device_ops efa_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
|
@ -450,7 +450,6 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
qp->rq_size, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
kfree(qp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -609,17 +608,16 @@ static int efa_qp_validate_attr(struct efa_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct efa_com_create_qp_params create_qp_params = {};
|
||||
struct efa_com_create_qp_result create_qp_resp;
|
||||
struct efa_dev *dev = to_edev(ibpd->device);
|
||||
struct efa_dev *dev = to_edev(ibqp->device);
|
||||
struct efa_ibv_create_qp_resp resp = {};
|
||||
struct efa_ibv_create_qp cmd = {};
|
||||
struct efa_qp *qp = to_eqp(ibqp);
|
||||
struct efa_ucontext *ucontext;
|
||||
struct efa_qp *qp;
|
||||
int err;
|
||||
|
||||
ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
|
||||
@ -664,14 +662,8 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
create_qp_params.uarn = ucontext->uarn;
|
||||
create_qp_params.pd = to_epd(ibpd)->pdn;
|
||||
create_qp_params.pd = to_epd(ibqp->pd)->pdn;
|
||||
|
||||
if (init_attr->qp_type == IB_QPT_UD) {
|
||||
create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
|
||||
@ -682,7 +674,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
"Unsupported qp type %d driver qp type %d\n",
|
||||
init_attr->qp_type, cmd.driver_qp_type);
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_free_qp;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
|
||||
@ -700,7 +692,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
qp->rq_size, DMA_TO_DEVICE);
|
||||
if (!qp->rq_cpu_addr) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_qp;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ibdev_dbg(&dev->ibdev,
|
||||
@ -746,7 +738,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
|
||||
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
|
||||
err_remove_mmap_entries:
|
||||
efa_qp_user_mmap_entries_remove(qp);
|
||||
@ -756,11 +748,9 @@ err_free_mapped:
|
||||
if (qp->rq_size)
|
||||
efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
|
||||
qp->rq_size, DMA_TO_DEVICE);
|
||||
err_free_qp:
|
||||
kfree(qp);
|
||||
err_out:
|
||||
atomic64_inc(&dev->stats.create_qp_err);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct {
|
||||
|
@ -1216,9 +1216,8 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
|
||||
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
|
||||
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
|
||||
|
||||
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
|
||||
|
@ -454,6 +454,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
|
@ -959,8 +959,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
||||
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||
int ret;
|
||||
|
||||
hr_qp->ibqp.qp_type = init_attr->qp_type;
|
||||
|
||||
if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
|
||||
init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
|
||||
|
||||
@ -1121,8 +1119,6 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
||||
free_qp_buf(hr_dev, hr_qp);
|
||||
free_kernel_wrid(hr_qp);
|
||||
free_qp_db(hr_dev, hr_qp, udata);
|
||||
|
||||
kfree(hr_qp);
|
||||
}
|
||||
|
||||
static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
|
||||
@ -1154,22 +1150,18 @@ out:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device;
|
||||
struct ib_device *ibdev = qp->device;
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
|
||||
struct hns_roce_qp *hr_qp;
|
||||
struct hns_roce_qp *hr_qp = to_hr_qp(qp);
|
||||
struct ib_pd *pd = qp->pd;
|
||||
int ret;
|
||||
|
||||
ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
|
||||
if (!hr_qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return ret;
|
||||
|
||||
if (init_attr->qp_type == IB_QPT_XRC_TGT)
|
||||
hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
|
||||
@ -1180,15 +1172,11 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
|
||||
}
|
||||
|
||||
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
|
||||
init_attr->qp_type, ret);
|
||||
|
||||
kfree(hr_qp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &hr_qp->ibqp;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int to_hr_qp_type(int qp_type)
|
||||
|
@ -1141,10 +1141,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
|
||||
iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
|
||||
iwqp->kqp.dma_mem.va = NULL;
|
||||
kfree(iwqp->kqp.sq_wrid_mem);
|
||||
iwqp->kqp.sq_wrid_mem = NULL;
|
||||
kfree(iwqp->kqp.rq_wrid_mem);
|
||||
iwqp->kqp.rq_wrid_mem = NULL;
|
||||
kfree(iwqp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -792,18 +792,19 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
|
||||
|
||||
/**
|
||||
* irdma_create_qp - create qp
|
||||
* @ibpd: ptr of pd
|
||||
* @ibqp: ptr of qp
|
||||
* @init_attr: attributes for qp
|
||||
* @udata: user data for create qp
|
||||
*/
|
||||
static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
static int irdma_create_qp(struct ib_qp *ibqp,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_pd *ibpd = ibqp->pd;
|
||||
struct irdma_pd *iwpd = to_iwpd(ibpd);
|
||||
struct irdma_device *iwdev = to_iwdev(ibpd->device);
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
struct irdma_qp *iwqp;
|
||||
struct irdma_qp *iwqp = to_iwqp(ibqp);
|
||||
struct irdma_create_qp_req req;
|
||||
struct irdma_create_qp_resp uresp = {};
|
||||
u32 qp_num = 0;
|
||||
@ -820,7 +821,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
|
||||
if (err_code)
|
||||
return ERR_PTR(err_code);
|
||||
return err_code;
|
||||
|
||||
sq_size = init_attr->cap.max_send_wr;
|
||||
rq_size = init_attr->cap.max_recv_wr;
|
||||
@ -833,10 +834,6 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
|
||||
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
|
||||
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
|
||||
|
||||
iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
|
||||
if (!iwqp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
qp = &iwqp->sc_qp;
|
||||
qp->qp_uk.back_qp = iwqp;
|
||||
qp->qp_uk.lock = &iwqp->lock;
|
||||
@ -849,10 +846,8 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
|
||||
iwqp->q2_ctx_mem.size,
|
||||
&iwqp->q2_ctx_mem.pa,
|
||||
GFP_KERNEL);
|
||||
if (!iwqp->q2_ctx_mem.va) {
|
||||
err_code = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
if (!iwqp->q2_ctx_mem.va)
|
||||
return -ENOMEM;
|
||||
|
||||
init_info.q2 = iwqp->q2_ctx_mem.va;
|
||||
init_info.q2_pa = iwqp->q2_ctx_mem.pa;
|
||||
@ -1001,17 +996,16 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
|
||||
if (err_code) {
|
||||
ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
|
||||
irdma_destroy_qp(&iwqp->ibqp, udata);
|
||||
return ERR_PTR(err_code);
|
||||
return err_code;
|
||||
}
|
||||
}
|
||||
|
||||
init_completion(&iwqp->free_qp);
|
||||
return &iwqp->ibqp;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
irdma_free_qp_rsrc(iwqp);
|
||||
|
||||
return ERR_PTR(err_code);
|
||||
return err_code;
|
||||
}
|
||||
|
||||
static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
|
||||
@ -4406,6 +4400,7 @@ static const struct ib_device_ops irdma_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -2577,6 +2577,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
|
||||
};
|
||||
|
@ -792,9 +792,8 @@ void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
|
||||
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
|
||||
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
|
||||
void mlx4_ib_drain_sq(struct ib_qp *qp);
|
||||
void mlx4_ib_drain_rq(struct ib_qp *qp);
|
||||
|
@ -1578,24 +1578,19 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata) {
|
||||
struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
|
||||
int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *device = ibqp->device;
|
||||
struct mlx4_ib_dev *dev = to_mdev(device);
|
||||
struct mlx4_ib_qp *qp;
|
||||
struct mlx4_ib_qp *qp = to_mqp(ibqp);
|
||||
struct ib_pd *pd = ibqp->pd;
|
||||
int ret;
|
||||
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
|
||||
if (ret) {
|
||||
kfree(qp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (init_attr->qp_type == IB_QPT_GSI &&
|
||||
!(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
|
||||
@ -1618,7 +1613,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
||||
init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
|
||||
}
|
||||
}
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
@ -1646,8 +1641,6 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
kfree(mqp->sqp);
|
||||
kfree(mqp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -193,8 +193,6 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
|
||||
|
||||
kfree(gsi->outstanding_wrs);
|
||||
kfree(gsi->tx_qps);
|
||||
kfree(mqp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3805,6 +3805,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
|
||||
};
|
||||
|
@ -1219,9 +1219,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
|
||||
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
|
||||
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
|
||||
|
@ -3114,7 +3114,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
|
||||
}
|
||||
|
||||
kfree(mqp->dct.in);
|
||||
kfree(mqp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3152,25 +3151,23 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
|
||||
return ret ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_create_qp_params params = {};
|
||||
struct mlx5_ib_dev *dev;
|
||||
struct mlx5_ib_qp *qp;
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
|
||||
struct mlx5_ib_qp *qp = to_mqp(ibqp);
|
||||
struct ib_pd *pd = ibqp->pd;
|
||||
enum ib_qp_type type;
|
||||
int err;
|
||||
|
||||
dev = pd ? to_mdev(pd->device) :
|
||||
to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device);
|
||||
|
||||
err = check_qp_type(dev, attr, &type);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
err = check_valid_flow(dev, pd, attr, udata);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
params.udata = udata;
|
||||
params.uidx = MLX5_IB_DEFAULT_UIDX;
|
||||
@ -3180,49 +3177,43 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
|
||||
if (udata) {
|
||||
err = process_udata_size(dev, ¶ms);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
err = check_ucmd_data(dev, ¶ms);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
|
||||
if (!params.ucmd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
|
||||
if (err)
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
err = -ENOMEM;
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
qp->type = type;
|
||||
if (udata) {
|
||||
err = process_vendor_flags(dev, qp, params.ucmd, attr);
|
||||
if (err)
|
||||
goto free_qp;
|
||||
goto free_ucmd;
|
||||
|
||||
err = get_qp_uidx(qp, ¶ms);
|
||||
if (err)
|
||||
goto free_qp;
|
||||
goto free_ucmd;
|
||||
}
|
||||
err = process_create_flags(dev, qp, attr);
|
||||
if (err)
|
||||
goto free_qp;
|
||||
goto free_ucmd;
|
||||
|
||||
err = check_qp_attr(dev, qp, attr);
|
||||
if (err)
|
||||
goto free_qp;
|
||||
goto free_ucmd;
|
||||
|
||||
err = create_qp(dev, pd, qp, ¶ms);
|
||||
if (err)
|
||||
goto free_qp;
|
||||
goto free_ucmd;
|
||||
|
||||
kfree(params.ucmd);
|
||||
params.ucmd = NULL;
|
||||
@ -3237,7 +3228,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
|
||||
if (err)
|
||||
goto destroy_qp;
|
||||
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
|
||||
destroy_qp:
|
||||
switch (qp->type) {
|
||||
@ -3248,22 +3239,12 @@ destroy_qp:
|
||||
mlx5_ib_destroy_gsi(qp);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* These lines below are temp solution till QP allocation
|
||||
* will be moved to be under IB/core responsiblity.
|
||||
*/
|
||||
qp->ibqp.send_cq = attr->send_cq;
|
||||
qp->ibqp.recv_cq = attr->recv_cq;
|
||||
qp->ibqp.pd = pd;
|
||||
destroy_qp_common(dev, qp, udata);
|
||||
}
|
||||
|
||||
qp = NULL;
|
||||
free_qp:
|
||||
kfree(qp);
|
||||
free_ucmd:
|
||||
kfree(params.ucmd);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
@ -3278,9 +3259,6 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
return mlx5_ib_destroy_dct(mqp);
|
||||
|
||||
destroy_qp_common(dev, mqp, udata);
|
||||
|
||||
kfree(mqp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -459,52 +459,45 @@ static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
static int mthca_create_qp(struct ib_qp *ibqp,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mthca_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct mthca_ucontext, ibucontext);
|
||||
struct mthca_create_qp ucmd;
|
||||
struct mthca_qp *qp;
|
||||
struct mthca_qp *qp = to_mqp(ibqp);
|
||||
struct mthca_dev *dev = to_mdev(ibqp->device);
|
||||
int err;
|
||||
|
||||
if (init_attr->create_flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_RC:
|
||||
case IB_QPT_UC:
|
||||
case IB_QPT_UD:
|
||||
{
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (udata) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
|
||||
kfree(qp);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
|
||||
return -EFAULT;
|
||||
|
||||
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
|
||||
err = mthca_map_user_db(dev, &context->uar,
|
||||
context->db_tab,
|
||||
ucmd.sq_db_index, ucmd.sq_db_page);
|
||||
if (err) {
|
||||
kfree(qp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
ucmd.sq_db_index,
|
||||
ucmd.sq_db_page);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
|
||||
err = mthca_map_user_db(dev, &context->uar,
|
||||
context->db_tab,
|
||||
ucmd.rq_db_index, ucmd.rq_db_page);
|
||||
ucmd.rq_db_index,
|
||||
ucmd.rq_db_page);
|
||||
if (err) {
|
||||
mthca_unmap_user_db(to_mdev(pd->device),
|
||||
&context->uar,
|
||||
mthca_unmap_user_db(dev, &context->uar,
|
||||
context->db_tab,
|
||||
ucmd.sq_db_index);
|
||||
kfree(qp);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
qp->mr.ibmr.lkey = ucmd.lkey;
|
||||
@ -512,20 +505,16 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
qp->rq.db_index = ucmd.rq_db_index;
|
||||
}
|
||||
|
||||
err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
|
||||
err = mthca_alloc_qp(dev, to_mpd(ibqp->pd),
|
||||
to_mcq(init_attr->send_cq),
|
||||
to_mcq(init_attr->recv_cq),
|
||||
init_attr->qp_type, init_attr->sq_sig_type,
|
||||
&init_attr->cap, qp, udata);
|
||||
|
||||
if (err && udata) {
|
||||
mthca_unmap_user_db(to_mdev(pd->device),
|
||||
&context->uar,
|
||||
context->db_tab,
|
||||
mthca_unmap_user_db(dev, &context->uar, context->db_tab,
|
||||
ucmd.sq_db_index);
|
||||
mthca_unmap_user_db(to_mdev(pd->device),
|
||||
&context->uar,
|
||||
context->db_tab,
|
||||
mthca_unmap_user_db(dev, &context->uar, context->db_tab,
|
||||
ucmd.rq_db_index);
|
||||
}
|
||||
|
||||
@ -535,34 +524,28 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
case IB_QPT_SMI:
|
||||
case IB_QPT_GSI:
|
||||
{
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
|
||||
if (!qp->sqp) {
|
||||
kfree(qp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
if (!qp->sqp)
|
||||
return -ENOMEM;
|
||||
|
||||
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
|
||||
|
||||
err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
|
||||
err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd),
|
||||
to_mcq(init_attr->send_cq),
|
||||
to_mcq(init_attr->recv_cq),
|
||||
init_attr->sq_sig_type, &init_attr->cap,
|
||||
qp->ibqp.qp_num, init_attr->port_num,
|
||||
qp, udata);
|
||||
qp->ibqp.qp_num, init_attr->port_num, qp,
|
||||
udata);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
/* Don't support raw QPs */
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
kfree(qp->sqp);
|
||||
kfree(qp);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
init_attr->cap.max_send_wr = qp->sq.max;
|
||||
@ -571,7 +554,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
init_attr->cap.max_recv_sge = qp->rq.max_gs;
|
||||
init_attr->cap.max_inline_data = qp->max_inline_data;
|
||||
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
@ -594,7 +577,6 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
}
|
||||
mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
|
||||
kfree(to_mqp(qp)->sqp);
|
||||
kfree(to_mqp(qp));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1121,6 +1103,7 @@ static const struct ib_device_ops mthca_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
|
@ -185,6 +185,7 @@ static const struct ib_device_ops ocrdma_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, ocrdma_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
|
@ -1288,19 +1288,19 @@ static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
int ocrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int status;
|
||||
struct ib_pd *ibpd = ibqp->pd;
|
||||
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
|
||||
struct ocrdma_qp *qp;
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
|
||||
struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
|
||||
struct ocrdma_create_qp_ureq ureq;
|
||||
u16 dpp_credit_lmt, dpp_offset;
|
||||
|
||||
if (attrs->create_flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
|
||||
if (status)
|
||||
@ -1309,12 +1309,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
|
||||
memset(&ureq, 0, sizeof(ureq));
|
||||
if (udata) {
|
||||
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
status = -ENOMEM;
|
||||
goto gen_err;
|
||||
return -EFAULT;
|
||||
}
|
||||
ocrdma_set_qp_init_params(qp, pd, attrs);
|
||||
if (udata == NULL)
|
||||
@ -1349,7 +1344,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
|
||||
ocrdma_store_gsi_qp_cq(dev, attrs);
|
||||
qp->ibqp.qp_num = qp->id;
|
||||
mutex_unlock(&dev->dev_lock);
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
|
||||
cpy_err:
|
||||
ocrdma_del_qpn_map(dev, qp);
|
||||
@ -1359,10 +1354,9 @@ mbx_err:
|
||||
mutex_unlock(&dev->dev_lock);
|
||||
kfree(qp->wqe_wr_id_tbl);
|
||||
kfree(qp->rqe_wr_id_tbl);
|
||||
kfree(qp);
|
||||
pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
|
||||
gen_err:
|
||||
return ERR_PTR(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
@ -1731,7 +1725,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
|
||||
kfree(qp->wqe_wr_id_tbl);
|
||||
kfree(qp->rqe_wr_id_tbl);
|
||||
kfree(qp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -75,9 +75,8 @@ int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
|
||||
struct ib_qp *ocrdma_create_qp(struct ib_pd *,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *);
|
||||
int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata);
|
||||
int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
|
||||
int attr_mask);
|
||||
int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
|
||||
|
@ -233,6 +233,7 @@ static const struct ib_device_ops qedr_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
|
||||
|
@ -319,20 +319,19 @@ err1:
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct qedr_qp *qp)
|
||||
int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
|
||||
struct qedr_qp *qp)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = qedr_check_gsi_qp_attrs(dev, attrs);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
return rc;
|
||||
|
||||
rc = qedr_ll2_start(dev, attrs, qp);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
|
||||
return ERR_PTR(rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* create QP */
|
||||
@ -359,7 +358,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
|
||||
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(qp->rqe_wr_id);
|
||||
@ -368,7 +367,7 @@ err:
|
||||
if (rc)
|
||||
DP_ERR(dev, "create gsi qp: failed destroy on create\n");
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int qedr_destroy_gsi_qp(struct qedr_dev *dev)
|
||||
|
@ -50,9 +50,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct qedr_qp *qp);
|
||||
int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
|
||||
struct qedr_qp *qp);
|
||||
void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
|
||||
int qedr_destroy_gsi_qp(struct qedr_dev *dev);
|
||||
|
@ -2239,34 +2239,30 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct qedr_xrcd *xrcd = NULL;
|
||||
struct qedr_pd *pd = NULL;
|
||||
struct qedr_dev *dev;
|
||||
struct qedr_qp *qp;
|
||||
struct ib_qp *ibqp;
|
||||
struct ib_pd *ibpd = ibqp->pd;
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
|
||||
struct qedr_qp *qp = get_qedr_qp(ibqp);
|
||||
int rc = 0;
|
||||
|
||||
if (attrs->create_flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attrs->qp_type == IB_QPT_XRC_TGT) {
|
||||
if (attrs->qp_type == IB_QPT_XRC_TGT)
|
||||
xrcd = get_qedr_xrcd(attrs->xrcd);
|
||||
dev = get_qedr_dev(xrcd->ibxrcd.device);
|
||||
} else {
|
||||
else
|
||||
pd = get_qedr_pd(ibpd);
|
||||
dev = get_qedr_dev(ibpd->device);
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
|
||||
udata ? "user library" : "kernel", pd);
|
||||
|
||||
rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
return rc;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
|
||||
@ -2276,20 +2272,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
get_qedr_cq(attrs->recv_cq),
|
||||
attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
|
||||
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
DP_ERR(dev, "create qp: failed allocating memory\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
qedr_set_common_qp_params(dev, qp, pd, attrs);
|
||||
|
||||
if (attrs->qp_type == IB_QPT_GSI) {
|
||||
ibqp = qedr_create_gsi_qp(dev, attrs, qp);
|
||||
if (IS_ERR(ibqp))
|
||||
kfree(qp);
|
||||
return ibqp;
|
||||
}
|
||||
if (attrs->qp_type == IB_QPT_GSI)
|
||||
return qedr_create_gsi_qp(dev, attrs, qp);
|
||||
|
||||
if (udata || xrcd)
|
||||
rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
|
||||
@ -2297,7 +2283,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
|
||||
|
||||
if (rc)
|
||||
goto out_free_qp;
|
||||
return rc;
|
||||
|
||||
qp->ibqp.qp_num = qp->qp_id;
|
||||
|
||||
@ -2307,14 +2293,11 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
goto out_free_qp_resources;
|
||||
}
|
||||
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
|
||||
out_free_qp_resources:
|
||||
qedr_free_qp_resources(dev, qp, udata);
|
||||
out_free_qp:
|
||||
kfree(qp);
|
||||
|
||||
return ERR_PTR(-EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
|
||||
@ -2874,8 +2857,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
qedr_iw_qp_rem_ref(&qp->ibqp);
|
||||
else
|
||||
kfree(qp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -56,8 +56,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *);
|
||||
int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata);
|
||||
int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
|
||||
|
@ -360,6 +360,7 @@ static const struct ib_device_ops usnic_dev_ops = {
|
||||
.reg_user_mr = usnic_ib_reg_mr,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
|
@ -665,13 +665,12 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct usnic_ib_qp_grp *
|
||||
usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
|
||||
struct usnic_ib_pd *pd,
|
||||
struct usnic_vnic_res_spec *res_spec,
|
||||
struct usnic_transport_spec *transport_spec)
|
||||
int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp,
|
||||
struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
|
||||
struct usnic_ib_pd *pd,
|
||||
struct usnic_vnic_res_spec *res_spec,
|
||||
struct usnic_transport_spec *transport_spec)
|
||||
{
|
||||
struct usnic_ib_qp_grp *qp_grp;
|
||||
int err;
|
||||
enum usnic_transport_type transport = transport_spec->trans_type;
|
||||
struct usnic_ib_qp_grp_flow *qp_flow;
|
||||
@ -684,20 +683,15 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
|
||||
usnic_err("Spec does not meet minimum req for transport %d\n",
|
||||
transport);
|
||||
log_spec(res_spec);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
|
||||
if (!qp_grp)
|
||||
return NULL;
|
||||
|
||||
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
|
||||
qp_grp);
|
||||
if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
|
||||
err = qp_grp->res_chunk_list ?
|
||||
PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
|
||||
goto out_free_qp_grp;
|
||||
}
|
||||
if (IS_ERR_OR_NULL(qp_grp->res_chunk_list))
|
||||
return qp_grp->res_chunk_list ?
|
||||
PTR_ERR(qp_grp->res_chunk_list) :
|
||||
-ENOMEM;
|
||||
|
||||
err = qp_grp_and_vf_bind(vf, pd, qp_grp);
|
||||
if (err)
|
||||
@ -724,7 +718,7 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
|
||||
|
||||
usnic_ib_sysfs_qpn_add(qp_grp);
|
||||
|
||||
return qp_grp;
|
||||
return 0;
|
||||
|
||||
out_release_flow:
|
||||
release_and_remove_flow(qp_flow);
|
||||
@ -732,10 +726,7 @@ out_qp_grp_vf_unbind:
|
||||
qp_grp_and_vf_unbind(qp_grp);
|
||||
out_free_res:
|
||||
free_qp_grp_res(qp_grp->res_chunk_list);
|
||||
out_free_qp_grp:
|
||||
kfree(qp_grp);
|
||||
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
|
||||
@ -748,7 +739,6 @@ void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
|
||||
usnic_ib_sysfs_qpn_remove(qp_grp);
|
||||
qp_grp_and_vf_unbind(qp_grp);
|
||||
free_qp_grp_res(qp_grp->res_chunk_list);
|
||||
kfree(qp_grp);
|
||||
}
|
||||
|
||||
struct usnic_vnic_res_chunk*
|
||||
|
@ -89,11 +89,11 @@ extern const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX];
|
||||
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
|
||||
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
|
||||
int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
|
||||
struct usnic_ib_qp_grp *
|
||||
usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
|
||||
struct usnic_ib_pd *pd,
|
||||
struct usnic_vnic_res_spec *res_spec,
|
||||
struct usnic_transport_spec *trans_spec);
|
||||
int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp,
|
||||
struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
|
||||
struct usnic_ib_pd *pd,
|
||||
struct usnic_vnic_res_spec *res_spec,
|
||||
struct usnic_transport_spec *trans_spec);
|
||||
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp);
|
||||
int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
|
||||
enum ib_qp_state new_state,
|
||||
|
@ -168,30 +168,31 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct usnic_ib_qp_grp*
|
||||
find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
|
||||
struct usnic_ib_pd *pd,
|
||||
struct usnic_transport_spec *trans_spec,
|
||||
struct usnic_vnic_res_spec *res_spec)
|
||||
static int
|
||||
find_free_vf_and_create_qp_grp(struct ib_qp *qp,
|
||||
struct usnic_transport_spec *trans_spec,
|
||||
struct usnic_vnic_res_spec *res_spec)
|
||||
{
|
||||
struct usnic_ib_dev *us_ibdev = to_usdev(qp->device);
|
||||
struct usnic_ib_pd *pd = to_upd(qp->pd);
|
||||
struct usnic_ib_vf *vf;
|
||||
struct usnic_vnic *vnic;
|
||||
struct usnic_ib_qp_grp *qp_grp;
|
||||
struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(qp);
|
||||
struct device *dev, **dev_list;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
|
||||
|
||||
if (list_empty(&us_ibdev->vf_dev_list)) {
|
||||
usnic_info("No vfs to allocate\n");
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (usnic_ib_share_vf) {
|
||||
/* Try to find resouces on a used vf which is in pd */
|
||||
dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
|
||||
if (IS_ERR(dev_list))
|
||||
return ERR_CAST(dev_list);
|
||||
return PTR_ERR(dev_list);
|
||||
for (i = 0; dev_list[i]; i++) {
|
||||
dev = dev_list[i];
|
||||
vf = dev_get_drvdata(dev);
|
||||
@ -202,10 +203,10 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
|
||||
dev_name(&us_ibdev->ib_dev.dev),
|
||||
pci_name(usnic_vnic_get_pdev(
|
||||
vnic)));
|
||||
qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
|
||||
vf, pd,
|
||||
res_spec,
|
||||
trans_spec);
|
||||
ret = usnic_ib_qp_grp_create(qp_grp,
|
||||
us_ibdev->ufdev,
|
||||
vf, pd, res_spec,
|
||||
trans_spec);
|
||||
|
||||
spin_unlock(&vf->lock);
|
||||
goto qp_grp_check;
|
||||
@ -223,9 +224,9 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
|
||||
vnic = vf->vnic;
|
||||
if (vf->qp_grp_ref_cnt == 0 &&
|
||||
usnic_vnic_check_room(vnic, res_spec) == 0) {
|
||||
qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
|
||||
pd, res_spec,
|
||||
trans_spec);
|
||||
ret = usnic_ib_qp_grp_create(qp_grp, us_ibdev->ufdev,
|
||||
vf, pd, res_spec,
|
||||
trans_spec);
|
||||
|
||||
spin_unlock(&vf->lock);
|
||||
goto qp_grp_check;
|
||||
@ -235,16 +236,15 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
|
||||
|
||||
usnic_info("No free qp grp found on %s\n",
|
||||
dev_name(&us_ibdev->ib_dev.dev));
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
qp_grp_check:
|
||||
if (IS_ERR_OR_NULL(qp_grp)) {
|
||||
if (ret) {
|
||||
usnic_err("Failed to allocate qp_grp\n");
|
||||
if (usnic_ib_share_vf)
|
||||
usnic_uiom_free_dev_list(dev_list);
|
||||
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
|
||||
}
|
||||
return qp_grp;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
|
||||
@ -458,13 +458,12 @@ int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
int usnic_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
struct usnic_ib_dev *us_ibdev;
|
||||
struct usnic_ib_qp_grp *qp_grp;
|
||||
struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(ibqp);
|
||||
struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct usnic_ib_ucontext, ibucontext);
|
||||
int cq_cnt;
|
||||
@ -474,29 +473,29 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
|
||||
|
||||
usnic_dbg("\n");
|
||||
|
||||
us_ibdev = to_usdev(pd->device);
|
||||
us_ibdev = to_usdev(ibqp->device);
|
||||
|
||||
if (init_attr->create_flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
|
||||
if (err) {
|
||||
usnic_err("%s: cannot copy udata for create_qp\n",
|
||||
dev_name(&us_ibdev->ib_dev.dev));
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = create_qp_validate_user_data(cmd);
|
||||
if (err) {
|
||||
usnic_err("%s: Failed to validate user data\n",
|
||||
dev_name(&us_ibdev->ib_dev.dev));
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (init_attr->qp_type != IB_QPT_UD) {
|
||||
usnic_err("%s asked to make a non-UD QP: %d\n",
|
||||
dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
trans_spec = cmd.spec;
|
||||
@ -504,13 +503,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
|
||||
cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
|
||||
res_spec = min_transport_spec[trans_spec.trans_type];
|
||||
usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
|
||||
qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
|
||||
&trans_spec,
|
||||
&res_spec);
|
||||
if (IS_ERR_OR_NULL(qp_grp)) {
|
||||
err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
|
||||
err = find_free_vf_and_create_qp_grp(ibqp, &trans_spec, &res_spec);
|
||||
if (err)
|
||||
goto out_release_mutex;
|
||||
}
|
||||
|
||||
err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
|
||||
if (err) {
|
||||
@ -522,13 +517,13 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
|
||||
list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
|
||||
usnic_ib_log_vf(qp_grp->vf);
|
||||
mutex_unlock(&us_ibdev->usdev_lock);
|
||||
return &qp_grp->ibqp;
|
||||
return 0;
|
||||
|
||||
out_release_qp_grp:
|
||||
qp_grp_destroy(qp_grp);
|
||||
out_release_mutex:
|
||||
mutex_unlock(&us_ibdev->usdev_lock);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
|
@ -50,9 +50,8 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
|
||||
union ib_gid *gid);
|
||||
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int usnic_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
|
||||
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
|
@ -185,6 +185,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, pvrdma_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
|
@ -182,18 +182,17 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
|
||||
|
||||
/**
|
||||
* pvrdma_create_qp - create queue pair
|
||||
* @pd: protection domain
|
||||
* @ibqp: queue pair
|
||||
* @init_attr: queue pair attributes
|
||||
* @udata: user data
|
||||
*
|
||||
* @return: the ib_qp pointer on success, otherwise returns an errno.
|
||||
* @return: the 0 on success, otherwise returns an errno.
|
||||
*/
|
||||
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
int pvrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct pvrdma_qp *qp = NULL;
|
||||
struct pvrdma_dev *dev = to_vdev(pd->device);
|
||||
struct pvrdma_qp *qp = to_vqp(ibqp);
|
||||
struct pvrdma_dev *dev = to_vdev(ibqp->device);
|
||||
union pvrdma_cmd_req req;
|
||||
union pvrdma_cmd_resp rsp;
|
||||
struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
|
||||
@ -209,7 +208,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
dev_warn(&dev->pdev->dev,
|
||||
"invalid create queuepair flags %#x\n",
|
||||
init_attr->create_flags);
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (init_attr->qp_type != IB_QPT_RC &&
|
||||
@ -217,22 +216,22 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
init_attr->qp_type != IB_QPT_GSI) {
|
||||
dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
|
||||
init_attr->qp_type);
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (is_srq && !dev->dsr->caps.max_srq) {
|
||||
dev_warn(&dev->pdev->dev,
|
||||
"SRQs not supported by device\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_GSI:
|
||||
if (init_attr->port_num == 0 ||
|
||||
init_attr->port_num > pd->device->phys_port_cnt) {
|
||||
init_attr->port_num > ibqp->device->phys_port_cnt) {
|
||||
dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
|
||||
ret = -EINVAL;
|
||||
goto err_qp;
|
||||
@ -240,12 +239,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
fallthrough;
|
||||
case IB_QPT_RC:
|
||||
case IB_QPT_UD:
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_qp;
|
||||
}
|
||||
|
||||
spin_lock_init(&qp->sq.lock);
|
||||
spin_lock_init(&qp->rq.lock);
|
||||
mutex_init(&qp->mutex);
|
||||
@ -275,9 +268,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
|
||||
if (!is_srq) {
|
||||
/* set qp->sq.wqe_cnt, shift, buf_size.. */
|
||||
qp->rumem =
|
||||
ib_umem_get(pd->device, ucmd.rbuf_addr,
|
||||
ucmd.rbuf_size, 0);
|
||||
qp->rumem = ib_umem_get(ibqp->device,
|
||||
ucmd.rbuf_addr,
|
||||
ucmd.rbuf_size, 0);
|
||||
if (IS_ERR(qp->rumem)) {
|
||||
ret = PTR_ERR(qp->rumem);
|
||||
goto err_qp;
|
||||
@ -288,7 +281,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
qp->srq = to_vsrq(init_attr->srq);
|
||||
}
|
||||
|
||||
qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr,
|
||||
qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr,
|
||||
ucmd.sbuf_size, 0);
|
||||
if (IS_ERR(qp->sumem)) {
|
||||
if (!is_srq)
|
||||
@ -306,12 +299,12 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
qp->npages_recv = 0;
|
||||
qp->npages = qp->npages_send + qp->npages_recv;
|
||||
} else {
|
||||
ret = pvrdma_set_sq_size(to_vdev(pd->device),
|
||||
ret = pvrdma_set_sq_size(to_vdev(ibqp->device),
|
||||
&init_attr->cap, qp);
|
||||
if (ret)
|
||||
goto err_qp;
|
||||
|
||||
ret = pvrdma_set_rq_size(to_vdev(pd->device),
|
||||
ret = pvrdma_set_rq_size(to_vdev(ibqp->device),
|
||||
&init_attr->cap, qp);
|
||||
if (ret)
|
||||
goto err_qp;
|
||||
@ -362,7 +355,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
|
||||
cmd->pd_handle = to_vpd(pd)->pd_handle;
|
||||
cmd->pd_handle = to_vpd(ibqp->pd)->pd_handle;
|
||||
cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
|
||||
cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
|
||||
if (is_srq)
|
||||
@ -418,11 +411,11 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
dev_warn(&dev->pdev->dev,
|
||||
"failed to copy back udata\n");
|
||||
__pvrdma_destroy_qp(dev, qp);
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return &qp->ibqp;
|
||||
return 0;
|
||||
|
||||
err_pdir:
|
||||
pvrdma_page_dir_cleanup(dev, &qp->pdir);
|
||||
@ -430,10 +423,8 @@ err_umem:
|
||||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
err_qp:
|
||||
kfree(qp);
|
||||
atomic_dec(&dev->num_qps);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void _pvrdma_free_qp(struct pvrdma_qp *qp)
|
||||
@ -454,8 +445,6 @@ static void _pvrdma_free_qp(struct pvrdma_qp *qp)
|
||||
|
||||
pvrdma_page_dir_cleanup(dev, &qp->pdir);
|
||||
|
||||
kfree(qp);
|
||||
|
||||
atomic_dec(&dev->num_qps);
|
||||
}
|
||||
|
||||
|
@ -390,9 +390,8 @@ int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
|
||||
int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
|
||||
|
||||
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int pvrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||
|
@ -1058,7 +1058,7 @@ static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
|
||||
|
||||
/**
|
||||
* rvt_create_qp - create a queue pair for a device
|
||||
* @ibpd: the protection domain who's device we create the queue pair for
|
||||
* @ibqp: the queue pair
|
||||
* @init_attr: the attributes of the queue pair
|
||||
* @udata: user data for libibverbs.so
|
||||
*
|
||||
@ -1066,47 +1066,45 @@ static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
|
||||
* unique idea of what queue pair numbers mean. For instance there is a reserved
|
||||
* range for PSM.
|
||||
*
|
||||
* Return: the queue pair on success, otherwise returns an errno.
|
||||
* Return: 0 on success, otherwise returns an errno.
|
||||
*
|
||||
* Called by the ib_create_qp() core verbs function.
|
||||
*/
|
||||
struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_qp *qp;
|
||||
int err;
|
||||
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
|
||||
int ret = -ENOMEM;
|
||||
struct rvt_swqe *swq = NULL;
|
||||
size_t sz;
|
||||
size_t sg_list_sz = 0;
|
||||
struct ib_qp *ret = ERR_PTR(-ENOMEM);
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
|
||||
void *priv = NULL;
|
||||
size_t sqsize;
|
||||
u8 exclude_prefix = 0;
|
||||
|
||||
if (!rdi)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
|
||||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
/* Check receive queue parameters if no SRQ is specified. */
|
||||
if (!init_attr->srq) {
|
||||
if (init_attr->cap.max_recv_sge >
|
||||
rdi->dparms.props.max_recv_sge ||
|
||||
init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (init_attr->cap.max_send_sge +
|
||||
init_attr->cap.max_send_wr +
|
||||
init_attr->cap.max_recv_sge +
|
||||
init_attr->cap.max_recv_wr == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
sqsize =
|
||||
init_attr->cap.max_send_wr + 1 +
|
||||
@ -1115,8 +1113,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
case IB_QPT_SMI:
|
||||
case IB_QPT_GSI:
|
||||
if (init_attr->port_num == 0 ||
|
||||
init_attr->port_num > ibpd->device->phys_port_cnt)
|
||||
return ERR_PTR(-EINVAL);
|
||||
init_attr->port_num > ibqp->device->phys_port_cnt)
|
||||
return -EINVAL;
|
||||
fallthrough;
|
||||
case IB_QPT_UC:
|
||||
case IB_QPT_RC:
|
||||
@ -1124,7 +1122,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
|
||||
swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
|
||||
if (!swq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
if (init_attr->srq) {
|
||||
struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
|
||||
@ -1135,9 +1133,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
} else if (init_attr->cap.max_recv_sge > 1)
|
||||
sg_list_sz = sizeof(*qp->r_sg_list) *
|
||||
(init_attr->cap.max_recv_sge - 1);
|
||||
qp = kzalloc_node(sizeof(*qp), GFP_KERNEL, rdi->dparms.node);
|
||||
if (!qp)
|
||||
goto bail_swq;
|
||||
qp->r_sg_list =
|
||||
kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
|
||||
if (!qp->r_sg_list)
|
||||
@ -1166,7 +1161,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
*/
|
||||
priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
|
||||
if (IS_ERR(priv)) {
|
||||
ret = priv;
|
||||
ret = PTR_ERR(priv);
|
||||
goto bail_qp;
|
||||
}
|
||||
qp->priv = priv;
|
||||
@ -1180,12 +1175,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
|
||||
sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
|
||||
sizeof(struct rvt_rwqe);
|
||||
err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
|
||||
ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
|
||||
rdi->dparms.node, udata);
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
if (ret)
|
||||
goto bail_driver_priv;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1206,40 +1199,35 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
qp->s_max_sge = init_attr->cap.max_send_sge;
|
||||
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
|
||||
qp->s_flags = RVT_S_SIGNAL_REQ_WR;
|
||||
err = alloc_ud_wq_attr(qp, rdi->dparms.node);
|
||||
if (err) {
|
||||
ret = (ERR_PTR(err));
|
||||
ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
|
||||
if (ret)
|
||||
goto bail_rq_rvt;
|
||||
}
|
||||
|
||||
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
|
||||
exclude_prefix = RVT_AIP_QP_PREFIX;
|
||||
|
||||
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
|
||||
ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
|
||||
init_attr->qp_type,
|
||||
init_attr->port_num,
|
||||
exclude_prefix);
|
||||
if (err < 0) {
|
||||
ret = ERR_PTR(err);
|
||||
if (ret < 0)
|
||||
goto bail_rq_wq;
|
||||
}
|
||||
qp->ibqp.qp_num = err;
|
||||
|
||||
qp->ibqp.qp_num = ret;
|
||||
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
|
||||
qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
|
||||
qp->port_num = init_attr->port_num;
|
||||
rvt_init_qp(rdi, qp, init_attr->qp_type);
|
||||
if (rdi->driver_f.qp_priv_init) {
|
||||
err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
|
||||
if (ret)
|
||||
goto bail_rq_wq;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Don't support raw QPs */
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
init_attr->cap.max_inline_data = 0;
|
||||
@ -1252,28 +1240,24 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
if (!qp->r_rq.wq) {
|
||||
__u64 offset = 0;
|
||||
|
||||
err = ib_copy_to_udata(udata, &offset,
|
||||
ret = ib_copy_to_udata(udata, &offset,
|
||||
sizeof(offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
if (ret)
|
||||
goto bail_qpn;
|
||||
}
|
||||
} else {
|
||||
u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
|
||||
|
||||
qp->ip = rvt_create_mmap_info(rdi, s, udata,
|
||||
qp->r_rq.wq);
|
||||
if (IS_ERR(qp->ip)) {
|
||||
ret = ERR_CAST(qp->ip);
|
||||
ret = PTR_ERR(qp->ip);
|
||||
goto bail_qpn;
|
||||
}
|
||||
|
||||
err = ib_copy_to_udata(udata, &qp->ip->offset,
|
||||
ret = ib_copy_to_udata(udata, &qp->ip->offset,
|
||||
sizeof(qp->ip->offset));
|
||||
if (err) {
|
||||
ret = ERR_PTR(err);
|
||||
if (ret)
|
||||
goto bail_ip;
|
||||
}
|
||||
}
|
||||
qp->pid = current->pid;
|
||||
}
|
||||
@ -1281,7 +1265,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
spin_lock(&rdi->n_qps_lock);
|
||||
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
|
||||
spin_unlock(&rdi->n_qps_lock);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
ret = ENOMEM;
|
||||
goto bail_ip;
|
||||
}
|
||||
|
||||
@ -1307,9 +1291,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
spin_unlock_irq(&rdi->pending_lock);
|
||||
}
|
||||
|
||||
ret = &qp->ibqp;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
bail_ip:
|
||||
if (qp->ip)
|
||||
@ -1330,11 +1312,7 @@ bail_driver_priv:
|
||||
bail_qp:
|
||||
kfree(qp->s_ack_queue);
|
||||
kfree(qp->r_sg_list);
|
||||
kfree(qp);
|
||||
|
||||
bail_swq:
|
||||
vfree(swq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1769,7 +1747,6 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
rdma_destroy_ah_attr(&qp->alt_ah_attr);
|
||||
free_ud_wq_attr(qp);
|
||||
vfree(qp->s_wq);
|
||||
kfree(qp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -52,9 +52,8 @@
|
||||
|
||||
int rvt_driver_qp_init(struct rvt_dev_info *rdi);
|
||||
void rvt_qp_exit(struct rvt_dev_info *rdi);
|
||||
struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
|
||||
|
@ -131,6 +131,13 @@ static int rvt_query_device(struct ib_device *ibdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rvt_get_numa_node(struct ib_device *ibdev)
|
||||
{
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
|
||||
|
||||
return rdi->dparms.node;
|
||||
}
|
||||
|
||||
static int rvt_modify_device(struct ib_device *device,
|
||||
int device_modify_mask,
|
||||
struct ib_device_modify *device_modify)
|
||||
@ -380,6 +387,7 @@ static const struct ib_device_ops rvt_dev_ops = {
|
||||
.destroy_srq = rvt_destroy_srq,
|
||||
.detach_mcast = rvt_detach_mcast,
|
||||
.get_dma_mr = rvt_get_dma_mr,
|
||||
.get_numa_node = rvt_get_numa_node,
|
||||
.get_port_immutable = rvt_get_port_immutable,
|
||||
.map_mr_sg = rvt_map_mr_sg,
|
||||
.mmap = rvt_mmap,
|
||||
@ -406,6 +414,7 @@ static const struct ib_device_ops rvt_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, rvt_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
|
||||
};
|
||||
|
@ -41,7 +41,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
|
||||
.size = sizeof(struct rxe_qp),
|
||||
.elem_offset = offsetof(struct rxe_qp, pelem),
|
||||
.cleanup = rxe_qp_cleanup,
|
||||
.flags = RXE_POOL_INDEX,
|
||||
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
|
||||
.min_index = RXE_MIN_QP_INDEX,
|
||||
.max_index = RXE_MAX_QP_INDEX,
|
||||
},
|
||||
|
@ -391,59 +391,52 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct ib_udata *udata)
|
||||
static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
struct rxe_dev *rxe = to_rdev(ibpd->device);
|
||||
struct rxe_pd *pd = to_rpd(ibpd);
|
||||
struct rxe_qp *qp;
|
||||
struct rxe_dev *rxe = to_rdev(ibqp->device);
|
||||
struct rxe_pd *pd = to_rpd(ibqp->pd);
|
||||
struct rxe_qp *qp = to_rqp(ibqp);
|
||||
struct rxe_create_qp_resp __user *uresp = NULL;
|
||||
|
||||
if (udata) {
|
||||
if (udata->outlen < sizeof(*uresp))
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
uresp = udata->outbuf;
|
||||
}
|
||||
|
||||
if (init->create_flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = rxe_qp_chk_init(rxe, init);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
qp = rxe_alloc(&rxe->qp_pool);
|
||||
if (!qp) {
|
||||
err = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
return err;
|
||||
|
||||
if (udata) {
|
||||
if (udata->inlen) {
|
||||
err = -EINVAL;
|
||||
goto err2;
|
||||
}
|
||||
if (udata->inlen)
|
||||
return -EINVAL;
|
||||
|
||||
qp->is_user = true;
|
||||
} else {
|
||||
qp->is_user = false;
|
||||
}
|
||||
|
||||
rxe_add_index(qp);
|
||||
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
|
||||
err = rxe_add_to_pool(&rxe->qp_pool, qp);
|
||||
if (err)
|
||||
goto err3;
|
||||
return err;
|
||||
|
||||
return &qp->ibqp;
|
||||
rxe_add_index(qp);
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
|
||||
if (err)
|
||||
goto qp_init;
|
||||
|
||||
err3:
|
||||
return 0;
|
||||
|
||||
qp_init:
|
||||
rxe_drop_index(qp);
|
||||
err2:
|
||||
rxe_drop_ref(qp);
|
||||
err1:
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
@ -1145,6 +1138,7 @@ static const struct ib_device_ops rxe_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
|
||||
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
|
||||
|
@ -210,8 +210,8 @@ struct rxe_resp_info {
|
||||
};
|
||||
|
||||
struct rxe_qp {
|
||||
struct rxe_pool_entry pelem;
|
||||
struct ib_qp ibqp;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct ib_qp_attr attr;
|
||||
unsigned int valid;
|
||||
unsigned int mtu;
|
||||
|
@ -297,6 +297,7 @@ static const struct ib_device_ops siw_device_ops = {
|
||||
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, siw_qp, base_qp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext),
|
||||
};
|
||||
|
@ -1344,6 +1344,4 @@ void siw_free_qp(struct kref *ref)
|
||||
siw_put_tx_cpu(qp->tx_cpu);
|
||||
|
||||
atomic_dec(&sdev->num_qp);
|
||||
siw_dbg_qp(qp, "free QP\n");
|
||||
kfree_rcu(qp, rcu);
|
||||
}
|
||||
|
@ -285,16 +285,16 @@ siw_mmap_entry_insert(struct siw_ucontext *uctx,
|
||||
*
|
||||
* Create QP of requested size on given device.
|
||||
*
|
||||
* @pd: Protection Domain
|
||||
* @qp: Queue pait
|
||||
* @attrs: Initial QP attributes.
|
||||
* @udata: used to provide QP ID, SQ and RQ size back to user.
|
||||
*/
|
||||
|
||||
struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct siw_qp *qp = NULL;
|
||||
struct ib_pd *pd = ibqp->pd;
|
||||
struct siw_qp *qp = to_siw_qp(ibqp);
|
||||
struct ib_device *base_dev = pd->device;
|
||||
struct siw_device *sdev = to_siw_dev(base_dev);
|
||||
struct siw_ucontext *uctx =
|
||||
@ -307,17 +307,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
siw_dbg(base_dev, "create new QP\n");
|
||||
|
||||
if (attrs->create_flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
|
||||
siw_dbg(base_dev, "too many QP's\n");
|
||||
rv = -ENOMEM;
|
||||
goto err_out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (attrs->qp_type != IB_QPT_RC) {
|
||||
siw_dbg(base_dev, "only RC QP's supported\n");
|
||||
rv = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
goto err_atomic;
|
||||
}
|
||||
if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
|
||||
(attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
|
||||
@ -325,13 +324,13 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
(attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
|
||||
siw_dbg(base_dev, "QP size error\n");
|
||||
rv = -EINVAL;
|
||||
goto err_out;
|
||||
goto err_atomic;
|
||||
}
|
||||
if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
|
||||
siw_dbg(base_dev, "max inline send: %d > %d\n",
|
||||
attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
|
||||
rv = -EINVAL;
|
||||
goto err_out;
|
||||
goto err_atomic;
|
||||
}
|
||||
/*
|
||||
* NOTE: we allow for zero element SQ and RQ WQE's SGL's
|
||||
@ -340,19 +339,15 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
|
||||
siw_dbg(base_dev, "QP must have send or receive queue\n");
|
||||
rv = -EINVAL;
|
||||
goto err_out;
|
||||
goto err_atomic;
|
||||
}
|
||||
|
||||
if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
|
||||
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
|
||||
rv = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
rv = -ENOMEM;
|
||||
goto err_out;
|
||||
goto err_atomic;
|
||||
}
|
||||
|
||||
init_rwsem(&qp->state_lock);
|
||||
spin_lock_init(&qp->sq_lock);
|
||||
spin_lock_init(&qp->rq_lock);
|
||||
@ -360,7 +355,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
|
||||
rv = siw_qp_add(sdev, qp);
|
||||
if (rv)
|
||||
goto err_out;
|
||||
goto err_atomic;
|
||||
|
||||
num_sqe = attrs->cap.max_send_wr;
|
||||
num_rqe = attrs->cap.max_recv_wr;
|
||||
@ -482,23 +477,20 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
list_add_tail(&qp->devq, &sdev->qp_list);
|
||||
spin_unlock_irqrestore(&sdev->lock, flags);
|
||||
|
||||
return &qp->base_qp;
|
||||
return 0;
|
||||
|
||||
err_out_xa:
|
||||
xa_erase(&sdev->qp_xa, qp_id(qp));
|
||||
err_out:
|
||||
if (qp) {
|
||||
if (uctx) {
|
||||
rdma_user_mmap_entry_remove(qp->sq_entry);
|
||||
rdma_user_mmap_entry_remove(qp->rq_entry);
|
||||
}
|
||||
vfree(qp->sendq);
|
||||
vfree(qp->recvq);
|
||||
kfree(qp);
|
||||
if (uctx) {
|
||||
rdma_user_mmap_entry_remove(qp->sq_entry);
|
||||
rdma_user_mmap_entry_remove(qp->rq_entry);
|
||||
}
|
||||
atomic_dec(&sdev->num_qp);
|
||||
vfree(qp->sendq);
|
||||
vfree(qp->recvq);
|
||||
|
||||
return ERR_PTR(rv);
|
||||
err_atomic:
|
||||
atomic_dec(&sdev->num_qp);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -50,9 +50,8 @@ int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
|
||||
union ib_gid *gid);
|
||||
int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
|
||||
int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
|
||||
struct ib_qp *siw_create_qp(struct ib_pd *base_pd,
|
||||
struct ib_qp_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int siw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
|
||||
int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
|
||||
|
@ -2268,8 +2268,13 @@ struct iw_cm_conn_param;
|
||||
!__same_type(((struct drv_struct *)NULL)->member, \
|
||||
struct ib_struct)))
|
||||
|
||||
#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
|
||||
((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
|
||||
#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
|
||||
((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
|
||||
gfp, false))
|
||||
|
||||
#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
|
||||
((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
|
||||
GFP_KERNEL, true))
|
||||
|
||||
#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
|
||||
rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
|
||||
@ -2435,9 +2440,8 @@ struct ib_device_ops {
|
||||
struct ib_udata *udata);
|
||||
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
|
||||
int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
|
||||
struct ib_qp *(*create_qp)(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata);
|
||||
int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata);
|
||||
int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_udata *udata);
|
||||
int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
|
||||
@ -2635,11 +2639,18 @@ struct ib_device_ops {
|
||||
int (*query_ucontext)(struct ib_ucontext *context,
|
||||
struct uverbs_attr_bundle *attrs);
|
||||
|
||||
/*
|
||||
* Provide NUMA node. This API exists for rdmavt/hfi1 only.
|
||||
* Everyone else relies on Linux memory management model.
|
||||
*/
|
||||
int (*get_numa_node)(struct ib_device *dev);
|
||||
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_ah);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_counters);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_cq);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_mw);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_pd);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_qp);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_srq);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
|
||||
@ -2746,6 +2757,15 @@ struct ib_device {
|
||||
u32 lag_flags;
|
||||
};
|
||||
|
||||
static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
|
||||
gfp_t gfp, bool is_numa_aware)
|
||||
{
|
||||
if (is_numa_aware && dev->ops.get_numa_node)
|
||||
return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
|
||||
|
||||
return kzalloc(size, gfp);
|
||||
}
|
||||
|
||||
struct ib_client_nl_info;
|
||||
struct ib_client {
|
||||
const char *name;
|
||||
|
Loading…
Reference in New Issue
Block a user