mirror of
https://github.com/torvalds/linux.git
synced 2024-12-12 14:12:51 +00:00
RDMA: Cleanup undesired pd->uobject usage
Drivers should be using udata to determine if a method is invoked from user space or kernel space. A pd does not necessarily say a different objects is kernel or user. Transforming the tests to use udata eliminates a large number of uobject references from the drivers. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
af8d70375d
commit
e00b64f7c5
@ -698,7 +698,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
|
||||
ah->qplib_ah.flow_label = grh->flow_label;
|
||||
ah->qplib_ah.hop_limit = grh->hop_limit;
|
||||
ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
|
||||
if (ib_pd->uobject &&
|
||||
if (udata &&
|
||||
!rdma_is_multicast_addr((struct in6_addr *)
|
||||
grh->dgid.raw) &&
|
||||
!rdma_link_local_addr((struct in6_addr *)
|
||||
@ -729,7 +729,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
|
||||
}
|
||||
|
||||
/* Write AVID to shared page. */
|
||||
if (ib_pd->uobject) {
|
||||
if (udata) {
|
||||
struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
|
||||
struct bnxt_re_ucontext *uctx;
|
||||
unsigned long flag;
|
||||
|
@ -836,7 +836,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
* Kernel users need more wq space for fastreg WRs which can take
|
||||
* 2 WR fragments.
|
||||
*/
|
||||
ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
|
||||
ucontext = udata ? to_iwch_ucontext(pd->uobject->context) : NULL;
|
||||
if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
|
||||
wqsize = roundup_pow_of_two(rqsize +
|
||||
roundup_pow_of_two(attrs->cap.max_send_wr * 2));
|
||||
|
@ -2163,7 +2163,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
if (sqsize < 8)
|
||||
sqsize = 8;
|
||||
|
||||
ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
|
||||
ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL;
|
||||
|
||||
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
|
||||
if (!qhp)
|
||||
@ -2712,7 +2712,7 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
|
||||
rqsize = attrs->attr.max_wr + 1;
|
||||
rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
|
||||
|
||||
ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
|
||||
ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL;
|
||||
|
||||
srq = kzalloc(sizeof(*srq), GFP_KERNEL);
|
||||
if (!srq)
|
||||
|
@ -3926,7 +3926,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
|
||||
struct hns_roce_qp_work *qp_work;
|
||||
struct hns_roce_v1_priv *priv;
|
||||
struct hns_roce_cq *send_cq, *recv_cq;
|
||||
int is_user = !!ibqp->pd->uobject;
|
||||
bool is_user = ibqp->uobject;
|
||||
int is_timeout = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -4133,7 +4133,7 @@ out:
|
||||
|
||||
static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_qp *hr_qp,
|
||||
int is_user)
|
||||
bool is_user)
|
||||
{
|
||||
struct hns_roce_cq *send_cq, *recv_cq;
|
||||
struct device *dev = hr_dev->dev;
|
||||
@ -4210,7 +4210,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
|
||||
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
|
||||
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, ibqp->uobject);
|
||||
if (ret) {
|
||||
dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
|
||||
return ret;
|
||||
|
@ -280,7 +280,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
|
||||
EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
|
||||
|
||||
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
|
||||
struct ib_qp_cap *cap, int is_user, int has_rq,
|
||||
struct ib_qp_cap *cap, bool is_user, int has_rq,
|
||||
struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
@ -560,7 +560,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
||||
else
|
||||
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
|
||||
|
||||
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
|
||||
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
|
||||
hns_roce_qp_has_rq(init_attr), hr_qp);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_set_rq_size failed\n");
|
||||
@ -598,7 +598,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
||||
init_attr->cap.max_recv_sge];
|
||||
}
|
||||
|
||||
if (ib_pd->uobject) {
|
||||
if (udata) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||
dev_err(dev, "ib_copy_from_udata error for create qp\n");
|
||||
ret = -EFAULT;
|
||||
@ -783,7 +783,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
||||
else
|
||||
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
|
||||
|
||||
if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
|
||||
if (udata && (udata->outlen >= sizeof(resp)) &&
|
||||
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
|
||||
|
||||
/* indicate kernel supports rq record db */
|
||||
@ -810,7 +810,7 @@ err_qpn:
|
||||
hns_roce_release_range_qp(hr_dev, qpn, 1);
|
||||
|
||||
err_wrid:
|
||||
if (ib_pd->uobject) {
|
||||
if (udata) {
|
||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
||||
(udata->outlen >= sizeof(resp)) &&
|
||||
hns_roce_qp_has_rq(init_attr))
|
||||
@ -823,7 +823,7 @@ err_wrid:
|
||||
}
|
||||
|
||||
err_sq_dbmap:
|
||||
if (ib_pd->uobject)
|
||||
if (udata)
|
||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
|
||||
(udata->inlen >= sizeof(ucmd)) &&
|
||||
(udata->outlen >= sizeof(resp)) &&
|
||||
@ -836,13 +836,13 @@ err_mtt:
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
|
||||
|
||||
err_buf:
|
||||
if (ib_pd->uobject)
|
||||
if (hr_qp->umem)
|
||||
ib_umem_release(hr_qp->umem);
|
||||
else
|
||||
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
|
||||
|
||||
err_db:
|
||||
if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
|
||||
if (!udata && hns_roce_qp_has_rq(init_attr) &&
|
||||
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
|
||||
hns_roce_free_db(hr_dev, &hr_qp->rdb);
|
||||
|
||||
@ -888,7 +888,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
|
||||
}
|
||||
case IB_QPT_GSI: {
|
||||
/* Userspace is not allowed to create special QPs: */
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
dev_err(dev, "not support usr space GSI\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
|
||||
srq->event = hns_roce_ib_srq_event;
|
||||
srq->ibsrq.ext.xrc.srq_num = srq->srqn;
|
||||
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
|
||||
ret = -EFAULT;
|
||||
goto err_wrid;
|
||||
|
@ -673,28 +673,26 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
||||
goto error;
|
||||
}
|
||||
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
|
||||
if (ibpd->uobject && ibpd->uobject->context) {
|
||||
iwqp->user_mode = 1;
|
||||
ucontext = to_ucontext(ibpd->uobject->context);
|
||||
iwqp->user_mode = 1;
|
||||
ucontext = to_ucontext(ibpd->uobject->context);
|
||||
|
||||
if (req.user_wqe_buffers) {
|
||||
struct i40iw_pbl *iwpbl;
|
||||
if (req.user_wqe_buffers) {
|
||||
struct i40iw_pbl *iwpbl;
|
||||
|
||||
spin_lock_irqsave(
|
||||
&ucontext->qp_reg_mem_list_lock, flags);
|
||||
iwpbl = i40iw_get_pbl(
|
||||
(unsigned long)req.user_wqe_buffers,
|
||||
&ucontext->qp_reg_mem_list);
|
||||
spin_unlock_irqrestore(
|
||||
&ucontext->qp_reg_mem_list_lock, flags);
|
||||
spin_lock_irqsave(
|
||||
&ucontext->qp_reg_mem_list_lock, flags);
|
||||
iwpbl = i40iw_get_pbl(
|
||||
(unsigned long)req.user_wqe_buffers,
|
||||
&ucontext->qp_reg_mem_list);
|
||||
spin_unlock_irqrestore(
|
||||
&ucontext->qp_reg_mem_list_lock, flags);
|
||||
|
||||
if (!iwpbl) {
|
||||
err_code = -ENODATA;
|
||||
i40iw_pr_err("no pbl info\n");
|
||||
goto error;
|
||||
}
|
||||
memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
|
||||
if (!iwpbl) {
|
||||
err_code = -ENODATA;
|
||||
i40iw_pr_err("no pbl info\n");
|
||||
goto error;
|
||||
}
|
||||
memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
|
||||
}
|
||||
err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
|
||||
} else {
|
||||
@ -768,7 +766,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
||||
iwdev->qp_table[qp_num] = iwqp;
|
||||
i40iw_add_pdusecount(iwqp->iwpd);
|
||||
i40iw_add_devusecount(iwdev);
|
||||
if (ibpd->uobject && udata) {
|
||||
if (udata) {
|
||||
memset(&uresp, 0, sizeof(uresp));
|
||||
uresp.actual_sq_size = sq_size;
|
||||
uresp.actual_rq_size = rq_size;
|
||||
@ -2092,7 +2090,8 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
|
||||
ib_umem_release(iwmr->region);
|
||||
|
||||
if (iwmr->type != IW_MEMREG_TYPE_MEM) {
|
||||
if (ibpd->uobject) {
|
||||
/* region is released. only test for userness. */
|
||||
if (iwmr->region) {
|
||||
struct i40iw_ucontext *ucontext;
|
||||
|
||||
ucontext = to_ucontext(ibpd->uobject->context);
|
||||
|
@ -323,7 +323,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
|
||||
}
|
||||
|
||||
static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
int is_user, int has_rq, struct mlx4_ib_qp *qp,
|
||||
bool is_user, int has_rq, struct mlx4_ib_qp *qp,
|
||||
u32 inl_recv_sz)
|
||||
{
|
||||
/* Sanity check RQ size before proceeding */
|
||||
@ -942,7 +942,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
|
||||
|
||||
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
union {
|
||||
struct mlx4_ib_create_qp qp;
|
||||
struct mlx4_ib_create_wq wq;
|
||||
@ -991,7 +991,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
qp->flags |= MLX4_IB_QP_SCATTER_FCS;
|
||||
}
|
||||
|
||||
err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
|
||||
err = set_rq_size(dev, &init_attr->cap, udata,
|
||||
qp_has_rq(init_attr), qp, qp->inl_recv_sz);
|
||||
if (err)
|
||||
goto err;
|
||||
@ -1043,7 +1043,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
}
|
||||
qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
|
||||
} else {
|
||||
err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
|
||||
err = set_rq_size(dev, &init_attr->cap, udata,
|
||||
qp_has_rq(init_attr), qp, 0);
|
||||
if (err)
|
||||
goto err;
|
||||
@ -1189,7 +1189,7 @@ err_proxy:
|
||||
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
|
||||
free_proxy_bufs(pd->device, qp);
|
||||
err_wrid:
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
if (qp_has_rq(init_attr))
|
||||
mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
|
||||
} else {
|
||||
@ -1201,13 +1201,13 @@ err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
|
||||
|
||||
err_buf:
|
||||
if (pd->uobject)
|
||||
if (qp->umem)
|
||||
ib_umem_release(qp->umem);
|
||||
else
|
||||
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
|
||||
|
||||
err_db:
|
||||
if (!pd->uobject && qp_has_rq(init_attr))
|
||||
if (!udata && qp_has_rq(init_attr))
|
||||
mlx4_db_free(dev->dev, &qp->db);
|
||||
|
||||
err:
|
||||
@ -1332,7 +1332,7 @@ static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
|
||||
}
|
||||
|
||||
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
|
||||
enum mlx4_ib_source_type src, int is_user)
|
||||
enum mlx4_ib_source_type src, bool is_user)
|
||||
{
|
||||
struct mlx4_ib_cq *send_cq, *recv_cq;
|
||||
unsigned long flags;
|
||||
@ -1612,7 +1612,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
|
||||
struct mlx4_ib_pd *pd;
|
||||
|
||||
pd = get_pd(mqp);
|
||||
destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, !!pd->ibpd.uobject);
|
||||
destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, qp->uobject);
|
||||
}
|
||||
|
||||
if (is_sqp(dev, mqp))
|
||||
@ -4044,7 +4044,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
|
||||
struct mlx4_ib_create_wq ucmd;
|
||||
int err, required_cmd_sz;
|
||||
|
||||
if (!(udata && pd->uobject))
|
||||
if (!udata)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
|
||||
|
@ -105,7 +105,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
|
||||
|
||||
buf_size = srq->msrq.max * desc_size;
|
||||
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
struct mlx4_ib_create_srq ucmd;
|
||||
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
|
||||
@ -191,7 +191,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
|
||||
srq->msrq.event = mlx4_ib_srq_event;
|
||||
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
|
||||
|
||||
if (pd->uobject)
|
||||
if (udata)
|
||||
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
|
||||
err = -EFAULT;
|
||||
goto err_wrid;
|
||||
@ -202,7 +202,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
|
||||
return &srq->ibsrq;
|
||||
|
||||
err_wrid:
|
||||
if (pd->uobject)
|
||||
if (udata)
|
||||
mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
|
||||
else
|
||||
kvfree(srq->wrid);
|
||||
@ -211,13 +211,13 @@ err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &srq->mtt);
|
||||
|
||||
err_buf:
|
||||
if (pd->uobject)
|
||||
if (srq->umem)
|
||||
ib_umem_release(srq->umem);
|
||||
else
|
||||
mlx4_buf_free(dev->dev, buf_size, &srq->buf);
|
||||
|
||||
err_db:
|
||||
if (!pd->uobject)
|
||||
if (!udata)
|
||||
mlx4_db_free(dev->dev, &srq->db);
|
||||
|
||||
err_srq:
|
||||
|
@ -1904,7 +1904,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
|
||||
}
|
||||
|
||||
if (pd && pd->uobject) {
|
||||
if (udata) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||
mlx5_ib_dbg(dev, "copy failed\n");
|
||||
return -EFAULT;
|
||||
@ -1986,14 +1986,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
|
||||
qp->has_rq = qp_has_rq(init_attr);
|
||||
err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
|
||||
qp, (pd && pd->uobject) ? &ucmd : NULL);
|
||||
qp, udata ? &ucmd : NULL);
|
||||
if (err) {
|
||||
mlx5_ib_dbg(dev, "err %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pd) {
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
__u32 max_wqes =
|
||||
1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
|
||||
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
|
||||
@ -2064,7 +2064,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
|
||||
configure_responder_scat_cqe(init_attr, qpc);
|
||||
configure_requester_scat_cqe(dev, init_attr,
|
||||
(pd && pd->uobject) ? &ucmd : NULL,
|
||||
udata ? &ucmd : NULL,
|
||||
qpc);
|
||||
}
|
||||
|
||||
@ -2504,7 +2504,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
|
||||
dev = to_mdev(pd->device);
|
||||
|
||||
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
|
||||
if (!pd->uobject) {
|
||||
if (!udata) {
|
||||
mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
} else if (!to_mucontext(pd->uobject->context)->cqe_version) {
|
||||
|
@ -260,14 +260,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
}
|
||||
in.type = init_attr->srq_type;
|
||||
|
||||
if (pd->uobject)
|
||||
if (udata)
|
||||
err = create_srq_user(pd, srq, &in, udata, buf_size);
|
||||
else
|
||||
err = create_srq_kernel(dev, srq, &in, buf_size);
|
||||
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
|
||||
pd->uobject ? "user" : "kernel", err);
|
||||
udata ? "user" : "kernel", err);
|
||||
goto err_srq;
|
||||
}
|
||||
|
||||
@ -312,7 +312,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
srq->msrq.event = mlx5_ib_srq_event;
|
||||
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
|
||||
|
||||
if (pd->uobject)
|
||||
if (udata)
|
||||
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
|
||||
mlx5_ib_dbg(dev, "copy to user failed\n");
|
||||
err = -EFAULT;
|
||||
@ -327,7 +327,7 @@ err_core:
|
||||
mlx5_cmd_destroy_srq(dev, &srq->msrq);
|
||||
|
||||
err_usr_kern_srq:
|
||||
if (pd->uobject)
|
||||
if (udata)
|
||||
destroy_srq_user(pd, srq);
|
||||
else
|
||||
destroy_srq_kernel(dev, srq);
|
||||
|
@ -510,7 +510,8 @@ int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent
|
||||
void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
|
||||
|
||||
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
struct ib_srq_attr *attr, struct mthca_srq *srq);
|
||||
struct ib_srq_attr *attr, struct mthca_srq *srq,
|
||||
struct ib_udata *udata);
|
||||
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
|
||||
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
|
||||
@ -547,7 +548,8 @@ int mthca_alloc_qp(struct mthca_dev *dev,
|
||||
enum ib_qp_type type,
|
||||
enum ib_sig_type send_policy,
|
||||
struct ib_qp_cap *cap,
|
||||
struct mthca_qp *qp);
|
||||
struct mthca_qp *qp,
|
||||
struct ib_udata *udata);
|
||||
int mthca_alloc_sqp(struct mthca_dev *dev,
|
||||
struct mthca_pd *pd,
|
||||
struct mthca_cq *send_cq,
|
||||
@ -556,7 +558,8 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
|
||||
struct ib_qp_cap *cap,
|
||||
int qpn,
|
||||
int port,
|
||||
struct mthca_sqp *sqp);
|
||||
struct mthca_sqp *sqp,
|
||||
struct ib_udata *udata);
|
||||
void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
|
||||
int mthca_create_ah(struct mthca_dev *dev,
|
||||
struct mthca_pd *pd,
|
||||
|
@ -455,7 +455,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
|
||||
if (!srq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
context = to_mucontext(pd->uobject->context);
|
||||
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
|
||||
@ -475,9 +475,9 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
|
||||
}
|
||||
|
||||
err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
|
||||
&init_attr->attr, srq);
|
||||
&init_attr->attr, srq, udata);
|
||||
|
||||
if (err && pd->uobject)
|
||||
if (err && udata)
|
||||
mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
|
||||
context->db_tab, ucmd.db_index);
|
||||
|
||||
@ -537,7 +537,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (pd->uobject) {
|
||||
if (udata) {
|
||||
context = to_mucontext(pd->uobject->context);
|
||||
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
|
||||
@ -574,9 +574,9 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
to_mcq(init_attr->send_cq),
|
||||
to_mcq(init_attr->recv_cq),
|
||||
init_attr->qp_type, init_attr->sq_sig_type,
|
||||
&init_attr->cap, qp);
|
||||
&init_attr->cap, qp, udata);
|
||||
|
||||
if (err && pd->uobject) {
|
||||
if (err && udata) {
|
||||
context = to_mucontext(pd->uobject->context);
|
||||
|
||||
mthca_unmap_user_db(to_mdev(pd->device),
|
||||
@ -596,7 +596,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
case IB_QPT_GSI:
|
||||
{
|
||||
/* Don't allow userspace to create special QPs */
|
||||
if (pd->uobject)
|
||||
if (udata)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
|
||||
@ -610,7 +610,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
to_mcq(init_attr->recv_cq),
|
||||
init_attr->sq_sig_type, &init_attr->cap,
|
||||
qp->ibqp.qp_num, init_attr->port_num,
|
||||
to_msqp(qp));
|
||||
to_msqp(qp), udata);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -981,7 +981,8 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev,
|
||||
*/
|
||||
static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
|
||||
struct mthca_pd *pd,
|
||||
struct mthca_qp *qp)
|
||||
struct mthca_qp *qp,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int size;
|
||||
int err = -ENOMEM;
|
||||
@ -1048,7 +1049,7 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
|
||||
* allocate anything. All we need is to calculate the WQE
|
||||
* sizes and the send_wqe_offset, so we're done now.
|
||||
*/
|
||||
if (pd->ibpd.uobject)
|
||||
if (udata)
|
||||
return 0;
|
||||
|
||||
size = PAGE_ALIGN(qp->send_wqe_offset +
|
||||
@ -1155,7 +1156,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
|
||||
struct mthca_cq *send_cq,
|
||||
struct mthca_cq *recv_cq,
|
||||
enum ib_sig_type send_policy,
|
||||
struct mthca_qp *qp)
|
||||
struct mthca_qp *qp,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
@ -1178,7 +1180,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mthca_alloc_wqe_buf(dev, pd, qp);
|
||||
ret = mthca_alloc_wqe_buf(dev, pd, qp, udata);
|
||||
if (ret) {
|
||||
mthca_unmap_memfree(dev, qp);
|
||||
return ret;
|
||||
@ -1191,7 +1193,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
|
||||
* will be allocated and buffers will be initialized in
|
||||
* userspace.
|
||||
*/
|
||||
if (pd->ibpd.uobject)
|
||||
if (udata)
|
||||
return 0;
|
||||
|
||||
ret = mthca_alloc_memfree(dev, qp);
|
||||
@ -1285,7 +1287,8 @@ int mthca_alloc_qp(struct mthca_dev *dev,
|
||||
enum ib_qp_type type,
|
||||
enum ib_sig_type send_policy,
|
||||
struct ib_qp_cap *cap,
|
||||
struct mthca_qp *qp)
|
||||
struct mthca_qp *qp,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -1308,7 +1311,7 @@ int mthca_alloc_qp(struct mthca_dev *dev,
|
||||
qp->port = 0;
|
||||
|
||||
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
|
||||
send_policy, qp);
|
||||
send_policy, qp, udata);
|
||||
if (err) {
|
||||
mthca_free(&dev->qp_table.alloc, qp->qpn);
|
||||
return err;
|
||||
@ -1360,7 +1363,8 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
|
||||
struct ib_qp_cap *cap,
|
||||
int qpn,
|
||||
int port,
|
||||
struct mthca_sqp *sqp)
|
||||
struct mthca_sqp *sqp,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
|
||||
int err;
|
||||
@ -1391,7 +1395,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
|
||||
sqp->qp.transport = MLX;
|
||||
|
||||
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
|
||||
send_policy, &sqp->qp);
|
||||
send_policy, &sqp->qp, udata);
|
||||
if (err)
|
||||
goto err_out_free;
|
||||
|
||||
|
@ -95,7 +95,8 @@ static inline int *wqe_to_link(void *wqe)
|
||||
static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
|
||||
struct mthca_pd *pd,
|
||||
struct mthca_srq *srq,
|
||||
struct mthca_tavor_srq_context *context)
|
||||
struct mthca_tavor_srq_context *context,
|
||||
bool is_user)
|
||||
{
|
||||
memset(context, 0, sizeof *context);
|
||||
|
||||
@ -103,7 +104,7 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
|
||||
context->state_pd = cpu_to_be32(pd->pd_num);
|
||||
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
|
||||
|
||||
if (pd->ibpd.uobject)
|
||||
if (is_user)
|
||||
context->uar =
|
||||
cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
|
||||
else
|
||||
@ -113,7 +114,8 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
|
||||
static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
|
||||
struct mthca_pd *pd,
|
||||
struct mthca_srq *srq,
|
||||
struct mthca_arbel_srq_context *context)
|
||||
struct mthca_arbel_srq_context *context,
|
||||
bool is_user)
|
||||
{
|
||||
int logsize, max;
|
||||
|
||||
@ -129,7 +131,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
|
||||
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
|
||||
context->db_index = cpu_to_be32(srq->db_index);
|
||||
context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
|
||||
if (pd->ibpd.uobject)
|
||||
if (is_user)
|
||||
context->logstride_usrpage |=
|
||||
cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
|
||||
else
|
||||
@ -145,14 +147,14 @@ static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
|
||||
}
|
||||
|
||||
static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
struct mthca_srq *srq)
|
||||
struct mthca_srq *srq, struct ib_udata *udata)
|
||||
{
|
||||
struct mthca_data_seg *scatter;
|
||||
void *wqe;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (pd->ibpd.uobject)
|
||||
if (udata)
|
||||
return 0;
|
||||
|
||||
srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
|
||||
@ -197,7 +199,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
}
|
||||
|
||||
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
struct ib_srq_attr *attr, struct mthca_srq *srq)
|
||||
struct ib_srq_attr *attr, struct mthca_srq *srq,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mthca_mailbox *mailbox;
|
||||
int ds;
|
||||
@ -235,7 +238,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
if (!pd->ibpd.uobject) {
|
||||
if (!udata) {
|
||||
srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
|
||||
srq->srqn, &srq->db);
|
||||
if (srq->db_index < 0) {
|
||||
@ -251,7 +254,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
goto err_out_db;
|
||||
}
|
||||
|
||||
err = mthca_alloc_srq_buf(dev, pd, srq);
|
||||
err = mthca_alloc_srq_buf(dev, pd, srq, udata);
|
||||
if (err)
|
||||
goto err_out_mailbox;
|
||||
|
||||
@ -261,9 +264,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
mutex_init(&srq->mutex);
|
||||
|
||||
if (mthca_is_memfree(dev))
|
||||
mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
|
||||
mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
|
||||
else
|
||||
mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
|
||||
mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
|
||||
|
||||
err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
|
||||
|
||||
@ -297,14 +300,14 @@ err_out_free_srq:
|
||||
mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
|
||||
|
||||
err_out_free_buf:
|
||||
if (!pd->ibpd.uobject)
|
||||
if (!udata)
|
||||
mthca_free_srq_buf(dev, srq);
|
||||
|
||||
err_out_mailbox:
|
||||
mthca_free_mailbox(dev, mailbox);
|
||||
|
||||
err_out_db:
|
||||
if (!pd->ibpd.uobject && mthca_is_memfree(dev))
|
||||
if (!udata && mthca_is_memfree(dev))
|
||||
mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
|
||||
|
||||
err_out_icm:
|
||||
|
@ -1066,7 +1066,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
||||
}
|
||||
if (req.user_qp_buffer)
|
||||
nesqp->nesuqp_addr = req.user_qp_buffer;
|
||||
if ((ibpd->uobject) && (ibpd->uobject->context)) {
|
||||
if (udata && (ibpd->uobject->context)) {
|
||||
nesqp->user_mode = 1;
|
||||
nes_ucontext = to_nesucontext(ibpd->uobject->context);
|
||||
if (virt_wqs) {
|
||||
@ -1257,7 +1257,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
nes_put_cqp_request(nesdev, cqp_request);
|
||||
|
||||
if (ibpd->uobject) {
|
||||
if (udata) {
|
||||
uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
|
||||
uresp.mmap_rq_db_index = 0;
|
||||
uresp.actual_sq_size = sq_size;
|
||||
|
@ -1157,7 +1157,8 @@ static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
|
||||
}
|
||||
|
||||
static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
if ((attrs->qp_type != IB_QPT_GSI) &&
|
||||
(attrs->qp_type != IB_QPT_RC) &&
|
||||
@ -1205,7 +1206,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
/* unprivileged user space cannot create special QP */
|
||||
if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
|
||||
if (udata && attrs->qp_type == IB_QPT_GSI) {
|
||||
pr_err
|
||||
("%s(%d) Userspace can't create special QPs of type=0x%x\n",
|
||||
__func__, dev->id, attrs->qp_type);
|
||||
@ -1362,7 +1363,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
|
||||
struct ocrdma_create_qp_ureq ureq;
|
||||
u16 dpp_credit_lmt, dpp_offset;
|
||||
|
||||
status = ocrdma_check_qp_params(ibpd, dev, attrs);
|
||||
status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
|
||||
if (status)
|
||||
goto gen_err;
|
||||
|
||||
|
@ -1136,7 +1136,8 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
|
||||
}
|
||||
|
||||
static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct qedr_device_attr *qattr = &dev->attr;
|
||||
|
||||
@ -1177,7 +1178,7 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
|
||||
}
|
||||
|
||||
/* Unprivileged user space cannot create special QP */
|
||||
if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
|
||||
if (udata && attrs->qp_type == IB_QPT_GSI) {
|
||||
DP_ERR(dev,
|
||||
"create qp: userspace can't create special QPs of type=0x%x\n",
|
||||
attrs->qp_type);
|
||||
@ -1540,7 +1541,7 @@ int qedr_destroy_srq(struct ib_srq *ibsrq)
|
||||
in_params.srq_id = srq->srq_id;
|
||||
dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
|
||||
|
||||
if (ibsrq->pd->uobject)
|
||||
if (ibsrq->uobject)
|
||||
qedr_free_srq_user_params(srq);
|
||||
else
|
||||
qedr_free_srq_kernel_params(srq);
|
||||
@ -1993,7 +1994,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
|
||||
udata ? "user library" : "kernel", pd);
|
||||
|
||||
rc = qedr_check_qp_attrs(ibpd, dev, attrs);
|
||||
rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
|
@ -249,7 +249,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
init_completion(&qp->free);
|
||||
|
||||
qp->state = IB_QPS_RESET;
|
||||
qp->is_kernel = !(pd->uobject && udata);
|
||||
qp->is_kernel = !udata;
|
||||
|
||||
if (!qp->is_kernel) {
|
||||
dev_dbg(&dev->pdev->dev,
|
||||
|
@ -111,7 +111,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!(pd->uobject && udata)) {
|
||||
if (!udata) {
|
||||
/* No support for kernel clients. */
|
||||
dev_warn(&dev->pdev->dev,
|
||||
"no shared receive queue support for kernel client\n");
|
||||
|
@ -157,7 +157,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
|
||||
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct rxe_create_qp_resp __user *uresp,
|
||||
struct ib_pd *ibpd);
|
||||
struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
|
||||
int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
|
||||
|
||||
|
@ -336,13 +336,14 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct rxe_create_qp_resp __user *uresp,
|
||||
struct ib_pd *ibpd)
|
||||
struct ib_pd *ibpd,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
struct rxe_cq *rcq = to_rcq(init->recv_cq);
|
||||
struct rxe_cq *scq = to_rcq(init->send_cq);
|
||||
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
|
||||
struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL;
|
||||
struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
|
||||
|
||||
rxe_add_ref(pd);
|
||||
rxe_add_ref(rcq);
|
||||
|
@ -486,7 +486,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
rxe_add_index(qp);
|
||||
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
|
||||
if (err)
|
||||
goto err3;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user