mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
RDMA/mana_ib: Use virtual address in dma regions for MRs
Introduce mana_ib_create_dma_region() to create dma regions with iova
for MRs. It allows creating MRs with any page offset. Previously,
only page-aligned addresses worked.
For dma regions that must have a zero dma offset (e.g., for queues),
mana_ib_create_zero_offset_dma_region() is added.
To get the zero offset, ib_umem_find_best_pgoff() is used with zero
pgoff_bitmask.
Fixes: 0266a17763
("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter")
Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://lore.kernel.org/r/1709560361-26393-3-git-send-email-kotaranov@linux.microsoft.com
Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
e02497fb65
commit
2d5c008157
@ -48,7 +48,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
|
||||
err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
|
||||
if (err) {
|
||||
ibdev_dbg(ibdev,
|
||||
"Failed to create dma region for create cq, %d\n",
|
||||
@ -57,7 +57,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
}
|
||||
|
||||
ibdev_dbg(ibdev,
|
||||
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
|
||||
"create_dma_region ret %d gdma_region 0x%llx\n",
|
||||
err, cq->gdma_region);
|
||||
|
||||
/*
|
||||
|
@ -301,8 +301,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
mana_handle_t *gdma_region)
|
||||
static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
mana_handle_t *gdma_region, unsigned long page_sz)
|
||||
{
|
||||
struct gdma_dma_region_add_pages_req *add_req = NULL;
|
||||
size_t num_pages_processed = 0, num_pages_to_handle;
|
||||
@ -314,7 +314,6 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
size_t max_pgs_create_cmd;
|
||||
struct gdma_context *gc;
|
||||
size_t num_pages_total;
|
||||
unsigned long page_sz;
|
||||
unsigned int tail = 0;
|
||||
u64 *page_addr_list;
|
||||
void *request_buf;
|
||||
@ -323,12 +322,6 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
gc = mdev_to_gc(dev);
|
||||
hwc = gc->hwc.driver_data;
|
||||
|
||||
/* Hardware requires dma region to align to chosen page size */
|
||||
page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
|
||||
if (!page_sz) {
|
||||
ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
|
||||
|
||||
max_pgs_create_cmd =
|
||||
@ -414,6 +407,35 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
mana_handle_t *gdma_region, u64 virt)
|
||||
{
|
||||
unsigned long page_sz;
|
||||
|
||||
page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
|
||||
if (!page_sz) {
|
||||
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
|
||||
}
|
||||
|
||||
int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
mana_handle_t *gdma_region)
|
||||
{
|
||||
unsigned long page_sz;
|
||||
|
||||
/* Hardware requires dma region to align to chosen page size */
|
||||
page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
|
||||
if (!page_sz) {
|
||||
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
|
||||
}
|
||||
|
||||
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
|
||||
{
|
||||
struct gdma_context *gc = mdev_to_gc(dev);
|
||||
|
@ -160,9 +160,12 @@ static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32
|
||||
|
||||
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
|
||||
|
||||
int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
mana_handle_t *gdma_region);
|
||||
|
||||
int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
||||
mana_handle_t *gdma_region, u64 virt);
|
||||
|
||||
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
|
||||
mana_handle_t gdma_region);
|
||||
|
||||
|
@ -127,7 +127,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
|
||||
err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
|
||||
if (err) {
|
||||
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
|
||||
err);
|
||||
@ -135,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
}
|
||||
|
||||
ibdev_dbg(ibdev,
|
||||
"mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
|
||||
"create_dma_region ret %d gdma_region %llx\n", err,
|
||||
dma_region_handle);
|
||||
|
||||
mr_params.pd_handle = pd->pd_handle;
|
||||
|
@ -357,7 +357,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
|
||||
}
|
||||
qp->sq_umem = umem;
|
||||
|
||||
err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
|
||||
err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
|
||||
&qp->sq_gdma_region);
|
||||
if (err) {
|
||||
ibdev_dbg(&mdev->ib_dev,
|
||||
@ -367,7 +367,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
|
||||
}
|
||||
|
||||
ibdev_dbg(&mdev->ib_dev,
|
||||
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
|
||||
"create_dma_region ret %d gdma_region 0x%llx\n",
|
||||
err, qp->sq_gdma_region);
|
||||
|
||||
/* Create a WQ on the same port handle used by the Ethernet */
|
||||
|
@ -46,7 +46,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
|
||||
wq->wq_buf_size = ucmd.wq_buf_size;
|
||||
wq->rx_object = INVALID_MANA_HANDLE;
|
||||
|
||||
err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
|
||||
err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
|
||||
if (err) {
|
||||
ibdev_dbg(&mdev->ib_dev,
|
||||
"Failed to create dma region for create wq, %d\n",
|
||||
@ -55,7 +55,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
|
||||
}
|
||||
|
||||
ibdev_dbg(&mdev->ib_dev,
|
||||
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
|
||||
"create_dma_region ret %d gdma_region 0x%llx\n",
|
||||
err, wq->gdma_region);
|
||||
|
||||
/* WQ ID is returned at wq_create time, doesn't know the value yet */
|
||||
|
Loading…
Reference in New Issue
Block a user