RDMA/hns: Optimize qp buffer allocation flow
Encapsulate qp buffer allocation related code into 3 functions: alloc_qp_buf(), map_wqe_buf() and free_qp_buf(). Link: https://lore.kernel.org/r/1582526258-13825-5-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang <wangxi11@huawei.com> Signed-off-by: Weihang Li <liweihang@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
df83a66e1b
commit
24c22112b9
@ -673,7 +673,6 @@ struct hns_roce_qp {
|
|||||||
/* this define must less than HNS_ROCE_MAX_BT_REGION */
|
/* this define must less than HNS_ROCE_MAX_BT_REGION */
|
||||||
#define HNS_ROCE_WQE_REGION_MAX 3
|
#define HNS_ROCE_WQE_REGION_MAX 3
|
||||||
struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX];
|
struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX];
|
||||||
int region_cnt;
|
|
||||||
int wqe_bt_pg_shift;
|
int wqe_bt_pg_shift;
|
||||||
|
|
||||||
u32 buff_size;
|
u32 buff_size;
|
||||||
|
@ -767,23 +767,147 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
|
|||||||
kfree(hr_qp->rq_inl_buf.wqe_list);
|
kfree(hr_qp->rq_inl_buf.wqe_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
||||||
|
u32 page_shift, bool is_user)
|
||||||
|
{
|
||||||
|
dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
|
||||||
|
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||||
|
struct hns_roce_buf_region *r;
|
||||||
|
int region_count;
|
||||||
|
int buf_count;
|
||||||
|
int ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
region_count = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions,
|
||||||
|
ARRAY_SIZE(hr_qp->regions), page_shift);
|
||||||
|
|
||||||
|
/* alloc a tmp list to store WQE buffers address */
|
||||||
|
ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, region_count);
|
||||||
|
if (ret) {
|
||||||
|
ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < region_count; i++) {
|
||||||
|
r = &hr_qp->regions[i];
|
||||||
|
if (is_user)
|
||||||
|
buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
|
||||||
|
r->count, r->offset, hr_qp->umem,
|
||||||
|
page_shift);
|
||||||
|
else
|
||||||
|
buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i],
|
||||||
|
r->count, r->offset, &hr_qp->hr_buf);
|
||||||
|
|
||||||
|
if (buf_count != r->count) {
|
||||||
|
ibdev_err(ibdev, "Failed to get %s WQE buf, expect %d = %d.\n",
|
||||||
|
is_user ? "user" : "kernel",
|
||||||
|
r->count, buf_count);
|
||||||
|
ret = -ENOBUFS;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions,
|
||||||
|
region_count);
|
||||||
|
hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
|
||||||
|
page_shift);
|
||||||
|
ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, hr_qp->regions,
|
||||||
|
region_count);
|
||||||
|
if (ret)
|
||||||
|
ibdev_err(ibdev, "Failed to attatch WQE's mtr\n");
|
||||||
|
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
|
||||||
|
done:
|
||||||
|
hns_roce_free_buf_list(buf_list, region_count);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
||||||
|
struct ib_qp_init_attr *init_attr,
|
||||||
|
struct ib_udata *udata, unsigned long addr)
|
||||||
|
{
|
||||||
|
u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
|
||||||
|
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||||
|
bool is_rq_buf_inline;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
|
||||||
|
hns_roce_qp_has_rq(init_attr);
|
||||||
|
if (is_rq_buf_inline) {
|
||||||
|
ret = alloc_rq_inline_buf(hr_qp, init_attr);
|
||||||
|
if (ret) {
|
||||||
|
ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (udata) {
|
||||||
|
hr_qp->umem = ib_umem_get(udata, addr, hr_qp->buff_size, 0);
|
||||||
|
if (IS_ERR(hr_qp->umem)) {
|
||||||
|
ret = PTR_ERR(hr_qp->umem);
|
||||||
|
goto err_inline;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
|
||||||
|
(1 << page_shift) * 2,
|
||||||
|
&hr_qp->hr_buf, page_shift);
|
||||||
|
if (ret)
|
||||||
|
goto err_inline;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = map_wqe_buf(hr_dev, hr_qp, page_shift, udata);
|
||||||
|
if (ret)
|
||||||
|
goto err_alloc;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_inline:
|
||||||
|
if (is_rq_buf_inline)
|
||||||
|
free_rq_inline_buf(hr_qp);
|
||||||
|
|
||||||
|
err_alloc:
|
||||||
|
if (udata) {
|
||||||
|
ib_umem_release(hr_qp->umem);
|
||||||
|
hr_qp->umem = NULL;
|
||||||
|
} else {
|
||||||
|
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
ibdev_err(ibdev, "Failed to alloc WQE buffer, ret %d.\n", ret);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
||||||
|
{
|
||||||
|
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
|
||||||
|
if (hr_qp->umem) {
|
||||||
|
ib_umem_release(hr_qp->umem);
|
||||||
|
hr_qp->umem = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hr_qp->hr_buf.nbufs > 0)
|
||||||
|
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
|
||||||
|
|
||||||
|
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
|
||||||
|
hr_qp->rq.wqe_cnt)
|
||||||
|
free_rq_inline_buf(hr_qp);
|
||||||
|
}
|
||||||
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
||||||
struct ib_pd *ib_pd,
|
struct ib_pd *ib_pd,
|
||||||
struct ib_qp_init_attr *init_attr,
|
struct ib_qp_init_attr *init_attr,
|
||||||
struct ib_udata *udata,
|
struct ib_udata *udata,
|
||||||
struct hns_roce_qp *hr_qp)
|
struct hns_roce_qp *hr_qp)
|
||||||
{
|
{
|
||||||
dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
|
|
||||||
struct device *dev = hr_dev->dev;
|
struct device *dev = hr_dev->dev;
|
||||||
struct hns_roce_ib_create_qp ucmd;
|
struct hns_roce_ib_create_qp ucmd;
|
||||||
struct hns_roce_ib_create_qp_resp resp = {};
|
struct hns_roce_ib_create_qp_resp resp = {};
|
||||||
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
|
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
|
||||||
udata, struct hns_roce_ucontext, ibucontext);
|
udata, struct hns_roce_ucontext, ibucontext);
|
||||||
struct hns_roce_buf_region *r;
|
|
||||||
u32 page_shift;
|
|
||||||
int buf_count;
|
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
|
||||||
|
|
||||||
mutex_init(&hr_qp->mutex);
|
mutex_init(&hr_qp->mutex);
|
||||||
spin_lock_init(&hr_qp->sq.lock);
|
spin_lock_init(&hr_qp->sq.lock);
|
||||||
@ -806,59 +930,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
|
|
||||||
hns_roce_qp_has_rq(init_attr)) {
|
|
||||||
ret = alloc_rq_inline_buf(hr_qp, init_attr);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "allocate receive inline buffer failed\n");
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
|
|
||||||
if (udata) {
|
if (udata) {
|
||||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||||
dev_err(dev, "ib_copy_from_udata error for create qp\n");
|
dev_err(dev, "ib_copy_from_udata error for create qp\n");
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto err_alloc_rq_inline_buf;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
|
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
|
||||||
&ucmd);
|
&ucmd);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
|
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
|
||||||
goto err_alloc_rq_inline_buf;
|
goto err_out;
|
||||||
}
|
|
||||||
|
|
||||||
hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr,
|
|
||||||
hr_qp->buff_size, 0);
|
|
||||||
if (IS_ERR(hr_qp->umem)) {
|
|
||||||
dev_err(dev, "ib_umem_get error for create qp\n");
|
|
||||||
ret = PTR_ERR(hr_qp->umem);
|
|
||||||
goto err_alloc_rq_inline_buf;
|
|
||||||
}
|
|
||||||
hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
|
|
||||||
hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
|
|
||||||
page_shift);
|
|
||||||
ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
|
|
||||||
hr_qp->region_cnt);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "alloc buf_list error for create qp\n");
|
|
||||||
goto err_alloc_list;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < hr_qp->region_cnt; i++) {
|
|
||||||
r = &hr_qp->regions[i];
|
|
||||||
buf_count = hns_roce_get_umem_bufs(hr_dev,
|
|
||||||
buf_list[i], r->count, r->offset,
|
|
||||||
hr_qp->umem, page_shift);
|
|
||||||
if (buf_count != r->count) {
|
|
||||||
dev_err(dev,
|
|
||||||
"get umem buf err, expect %d,ret %d.\n",
|
|
||||||
r->count, buf_count);
|
|
||||||
ret = -ENOBUFS;
|
|
||||||
goto err_get_bufs;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
|
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
|
||||||
@ -869,7 +952,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||||||
&hr_qp->sdb);
|
&hr_qp->sdb);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "sq record doorbell map failed!\n");
|
dev_err(dev, "sq record doorbell map failed!\n");
|
||||||
goto err_get_bufs;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* indicate kernel supports sq record db */
|
/* indicate kernel supports sq record db */
|
||||||
@ -896,13 +979,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||||||
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
|
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
|
||||||
dev_err(dev, "init_attr->create_flags error!\n");
|
dev_err(dev, "init_attr->create_flags error!\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_alloc_rq_inline_buf;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
|
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
|
||||||
dev_err(dev, "init_attr->create_flags error!\n");
|
dev_err(dev, "init_attr->create_flags error!\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_alloc_rq_inline_buf;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set SQ size */
|
/* Set SQ size */
|
||||||
@ -910,7 +993,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||||||
hr_qp);
|
hr_qp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
|
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
|
||||||
goto err_alloc_rq_inline_buf;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* QP doorbell register address */
|
/* QP doorbell register address */
|
||||||
@ -924,49 +1007,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||||||
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
|
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "rq record doorbell alloc failed!\n");
|
dev_err(dev, "rq record doorbell alloc failed!\n");
|
||||||
goto err_alloc_rq_inline_buf;
|
goto err_out;
|
||||||
}
|
}
|
||||||
*hr_qp->rdb.db_record = 0;
|
*hr_qp->rdb.db_record = 0;
|
||||||
hr_qp->rdb_en = 1;
|
hr_qp->rdb_en = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate QP buf */
|
|
||||||
if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
|
|
||||||
(1 << page_shift) * 2,
|
|
||||||
&hr_qp->hr_buf, page_shift)) {
|
|
||||||
dev_err(dev, "hns_roce_buf_alloc error!\n");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err_db;
|
|
||||||
}
|
|
||||||
hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
|
|
||||||
hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
|
|
||||||
page_shift);
|
|
||||||
ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
|
|
||||||
hr_qp->region_cnt);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "alloc buf_list error for create qp!\n");
|
|
||||||
goto err_alloc_list;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < hr_qp->region_cnt; i++) {
|
|
||||||
r = &hr_qp->regions[i];
|
|
||||||
buf_count = hns_roce_get_kmem_bufs(hr_dev,
|
|
||||||
buf_list[i], r->count, r->offset,
|
|
||||||
&hr_qp->hr_buf);
|
|
||||||
if (buf_count != r->count) {
|
|
||||||
dev_err(dev,
|
|
||||||
"get kmem buf err, expect %d,ret %d.\n",
|
|
||||||
r->count, buf_count);
|
|
||||||
ret = -ENOBUFS;
|
|
||||||
goto err_get_bufs;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
|
hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) {
|
if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err_get_bufs;
|
goto err_db;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hr_qp->rq.wqe_cnt) {
|
if (hr_qp->rq.wqe_cnt) {
|
||||||
@ -979,21 +1030,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions,
|
ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
|
||||||
hr_qp->region_cnt);
|
|
||||||
hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
|
|
||||||
page_shift);
|
|
||||||
ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list,
|
|
||||||
hr_qp->regions, hr_qp->region_cnt);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "mtr attach error for create qp\n");
|
ibdev_err(&hr_dev->ib_dev, "Failed to alloc QP buffer\n");
|
||||||
goto err_wrid;
|
goto err_db;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = alloc_qpn(hr_dev, hr_qp);
|
ret = alloc_qpn(hr_dev, hr_qp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ibdev_err(&hr_dev->ib_dev, "Failed to alloc QPN\n");
|
ibdev_err(&hr_dev->ib_dev, "Failed to alloc QPN\n");
|
||||||
goto err_mtr;
|
goto err_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = alloc_qpc(hr_dev, hr_qp);
|
ret = alloc_qpc(hr_dev, hr_qp);
|
||||||
@ -1026,8 +1072,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||||||
atomic_set(&hr_qp->refcount, 1);
|
atomic_set(&hr_qp->refcount, 1);
|
||||||
init_completion(&hr_qp->free);
|
init_completion(&hr_qp->free);
|
||||||
|
|
||||||
hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_store:
|
err_store:
|
||||||
@ -1039,10 +1083,9 @@ err_qpc:
|
|||||||
err_qpn:
|
err_qpn:
|
||||||
free_qpn(hr_dev, hr_qp);
|
free_qpn(hr_dev, hr_qp);
|
||||||
|
|
||||||
err_mtr:
|
err_buf:
|
||||||
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
|
free_qp_buf(hr_dev, hr_qp);
|
||||||
|
|
||||||
err_wrid:
|
|
||||||
if (udata) {
|
if (udata) {
|
||||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
||||||
(udata->outlen >= sizeof(resp)) &&
|
(udata->outlen >= sizeof(resp)) &&
|
||||||
@ -1065,24 +1108,11 @@ err_sq_wrid:
|
|||||||
if (!udata)
|
if (!udata)
|
||||||
kfree(hr_qp->sq.wrid);
|
kfree(hr_qp->sq.wrid);
|
||||||
|
|
||||||
err_get_bufs:
|
|
||||||
hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
|
|
||||||
|
|
||||||
err_alloc_list:
|
|
||||||
if (!hr_qp->umem)
|
|
||||||
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
|
|
||||||
ib_umem_release(hr_qp->umem);
|
|
||||||
|
|
||||||
err_db:
|
err_db:
|
||||||
if (!udata && hns_roce_qp_has_rq(init_attr) &&
|
if (!udata && hns_roce_qp_has_rq(init_attr) &&
|
||||||
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
|
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
|
||||||
hns_roce_free_db(hr_dev, &hr_qp->rdb);
|
hns_roce_free_db(hr_dev, &hr_qp->rdb);
|
||||||
|
|
||||||
err_alloc_rq_inline_buf:
|
|
||||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
|
|
||||||
hns_roce_qp_has_rq(init_attr))
|
|
||||||
free_rq_inline_buf(hr_qp);
|
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1098,7 +1128,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|||||||
|
|
||||||
free_qpn(hr_dev, hr_qp);
|
free_qpn(hr_dev, hr_qp);
|
||||||
|
|
||||||
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
|
free_qp_buf(hr_dev, hr_qp);
|
||||||
|
|
||||||
if (udata) {
|
if (udata) {
|
||||||
struct hns_roce_ucontext *context =
|
struct hns_roce_ucontext *context =
|
||||||
@ -1115,17 +1145,9 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|||||||
} else {
|
} else {
|
||||||
kfree(hr_qp->sq.wrid);
|
kfree(hr_qp->sq.wrid);
|
||||||
kfree(hr_qp->rq.wrid);
|
kfree(hr_qp->rq.wrid);
|
||||||
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
|
|
||||||
if (hr_qp->rq.wqe_cnt)
|
if (hr_qp->rq.wqe_cnt)
|
||||||
hns_roce_free_db(hr_dev, &hr_qp->rdb);
|
hns_roce_free_db(hr_dev, &hr_qp->rdb);
|
||||||
}
|
}
|
||||||
ib_umem_release(hr_qp->umem);
|
|
||||||
|
|
||||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
|
|
||||||
hr_qp->rq.wqe_cnt) {
|
|
||||||
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
|
|
||||||
kfree(hr_qp->rq_inl_buf.wqe_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(hr_qp);
|
kfree(hr_qp);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user