IB/core: Add passing an offset into the SG to ib_map_mr_sg
Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
0691a286d5
commit
ff2ba99365
@ -1597,6 +1597,7 @@ EXPORT_SYMBOL(ib_set_vf_guid);
|
|||||||
* @mr: memory region
|
* @mr: memory region
|
||||||
* @sg: dma mapped scatterlist
|
* @sg: dma mapped scatterlist
|
||||||
* @sg_nents: number of entries in sg
|
* @sg_nents: number of entries in sg
|
||||||
|
* @sg_offset: offset in bytes into sg
|
||||||
* @page_size: page vector desired page size
|
* @page_size: page vector desired page size
|
||||||
*
|
*
|
||||||
* Constraints:
|
* Constraints:
|
||||||
@ -1615,17 +1616,15 @@ EXPORT_SYMBOL(ib_set_vf_guid);
|
|||||||
* After this completes successfully, the memory region
|
* After this completes successfully, the memory region
|
||||||
* is ready for registration.
|
* is ready for registration.
|
||||||
*/
|
*/
|
||||||
int ib_map_mr_sg(struct ib_mr *mr,
|
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset, unsigned int page_size)
|
||||||
int sg_nents,
|
|
||||||
unsigned int page_size)
|
|
||||||
{
|
{
|
||||||
if (unlikely(!mr->device->map_mr_sg))
|
if (unlikely(!mr->device->map_mr_sg))
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
|
|
||||||
mr->page_size = page_size;
|
mr->page_size = page_size;
|
||||||
|
|
||||||
return mr->device->map_mr_sg(mr, sg, sg_nents);
|
return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_map_mr_sg);
|
EXPORT_SYMBOL(ib_map_mr_sg);
|
||||||
|
|
||||||
@ -1635,6 +1634,7 @@ EXPORT_SYMBOL(ib_map_mr_sg);
|
|||||||
* @mr: memory region
|
* @mr: memory region
|
||||||
* @sgl: dma mapped scatterlist
|
* @sgl: dma mapped scatterlist
|
||||||
* @sg_nents: number of entries in sg
|
* @sg_nents: number of entries in sg
|
||||||
|
* @sg_offset: offset in bytes into sg
|
||||||
* @set_page: driver page assignment function pointer
|
* @set_page: driver page assignment function pointer
|
||||||
*
|
*
|
||||||
* Core service helper for drivers to convert the largest
|
* Core service helper for drivers to convert the largest
|
||||||
@ -1645,10 +1645,8 @@ EXPORT_SYMBOL(ib_map_mr_sg);
|
|||||||
* Returns the number of sg elements that were assigned to
|
* Returns the number of sg elements that were assigned to
|
||||||
* a page vector.
|
* a page vector.
|
||||||
*/
|
*/
|
||||||
int ib_sg_to_pages(struct ib_mr *mr,
|
int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
|
||||||
struct scatterlist *sgl,
|
unsigned int sg_offset, int (*set_page)(struct ib_mr *, u64))
|
||||||
int sg_nents,
|
|
||||||
int (*set_page)(struct ib_mr *, u64))
|
|
||||||
{
|
{
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
u64 last_end_dma_addr = 0;
|
u64 last_end_dma_addr = 0;
|
||||||
@ -1656,12 +1654,12 @@ int ib_sg_to_pages(struct ib_mr *mr,
|
|||||||
u64 page_mask = ~((u64)mr->page_size - 1);
|
u64 page_mask = ~((u64)mr->page_size - 1);
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
mr->iova = sg_dma_address(&sgl[0]);
|
mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
|
||||||
mr->length = 0;
|
mr->length = 0;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_nents, i) {
|
for_each_sg(sgl, sg, sg_nents, i) {
|
||||||
u64 dma_addr = sg_dma_address(sg);
|
u64 dma_addr = sg_dma_address(sg) + sg_offset;
|
||||||
unsigned int dma_len = sg_dma_len(sg);
|
unsigned int dma_len = sg_dma_len(sg) - sg_offset;
|
||||||
u64 end_dma_addr = dma_addr + dma_len;
|
u64 end_dma_addr = dma_addr + dma_len;
|
||||||
u64 page_addr = dma_addr & page_mask;
|
u64 page_addr = dma_addr & page_mask;
|
||||||
|
|
||||||
@ -1694,6 +1692,8 @@ next_page:
|
|||||||
mr->length += dma_len;
|
mr->length += dma_len;
|
||||||
last_end_dma_addr = end_dma_addr;
|
last_end_dma_addr = end_dma_addr;
|
||||||
last_page_off = end_dma_addr & ~page_mask;
|
last_page_off = end_dma_addr & ~page_mask;
|
||||||
|
|
||||||
|
sg_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return i;
|
return i;
|
||||||
|
@ -783,15 +783,14 @@ static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iwch_map_mr_sg(struct ib_mr *ibmr,
|
static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||||
struct scatterlist *sg,
|
int sg_nents, unsigned sg_offset)
|
||||||
int sg_nents)
|
|
||||||
{
|
{
|
||||||
struct iwch_mr *mhp = to_iwch_mr(ibmr);
|
struct iwch_mr *mhp = to_iwch_mr(ibmr);
|
||||||
|
|
||||||
mhp->npages = 0;
|
mhp->npages = 0;
|
||||||
|
|
||||||
return ib_sg_to_pages(ibmr, sg, sg_nents, iwch_set_page);
|
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iwch_destroy_qp(struct ib_qp *ib_qp)
|
static int iwch_destroy_qp(struct ib_qp *ib_qp)
|
||||||
|
@ -917,9 +917,8 @@ void c4iw_qp_rem_ref(struct ib_qp *qp);
|
|||||||
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
||||||
enum ib_mr_type mr_type,
|
enum ib_mr_type mr_type,
|
||||||
u32 max_num_sg);
|
u32 max_num_sg);
|
||||||
int c4iw_map_mr_sg(struct ib_mr *ibmr,
|
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset);
|
||||||
int sg_nents);
|
|
||||||
int c4iw_dealloc_mw(struct ib_mw *mw);
|
int c4iw_dealloc_mw(struct ib_mw *mw);
|
||||||
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
|
@ -690,15 +690,14 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int c4iw_map_mr_sg(struct ib_mr *ibmr,
|
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset)
|
||||||
int sg_nents)
|
|
||||||
{
|
{
|
||||||
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
|
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
|
||||||
|
|
||||||
mhp->mpl_len = 0;
|
mhp->mpl_len = 0;
|
||||||
|
|
||||||
return ib_sg_to_pages(ibmr, sg, sg_nents, c4iw_set_page);
|
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
||||||
|
@ -1573,12 +1573,13 @@ static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
* @sg: scatter gather list for fmr
|
* @sg: scatter gather list for fmr
|
||||||
* @sg_nents: number of sg pages
|
* @sg_nents: number of sg pages
|
||||||
*/
|
*/
|
||||||
static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents)
|
static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||||
|
int sg_nents, unsigned int sg_offset)
|
||||||
{
|
{
|
||||||
struct i40iw_mr *iwmr = to_iwmr(ibmr);
|
struct i40iw_mr *iwmr = to_iwmr(ibmr);
|
||||||
|
|
||||||
iwmr->npages = 0;
|
iwmr->npages = 0;
|
||||||
return ib_sg_to_pages(ibmr, sg, sg_nents, i40iw_set_page);
|
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -717,9 +717,8 @@ int mlx4_ib_dealloc_mw(struct ib_mw *mw);
|
|||||||
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
|
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
|
||||||
enum ib_mr_type mr_type,
|
enum ib_mr_type mr_type,
|
||||||
u32 max_num_sg);
|
u32 max_num_sg);
|
||||||
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
|
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset);
|
||||||
int sg_nents);
|
|
||||||
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
||||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||||
|
@ -528,9 +528,8 @@ static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
|
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset)
|
||||||
int sg_nents)
|
|
||||||
{
|
{
|
||||||
struct mlx4_ib_mr *mr = to_mmr(ibmr);
|
struct mlx4_ib_mr *mr = to_mmr(ibmr);
|
||||||
int rc;
|
int rc;
|
||||||
@ -541,7 +540,7 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
|
|||||||
sizeof(u64) * mr->max_pages,
|
sizeof(u64) * mr->max_pages,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
rc = ib_sg_to_pages(ibmr, sg, sg_nents, mlx4_set_page);
|
rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
|
||||||
|
|
||||||
ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
|
ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
|
||||||
sizeof(u64) * mr->max_pages,
|
sizeof(u64) * mr->max_pages,
|
||||||
|
@ -712,9 +712,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
|
|||||||
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||||
enum ib_mr_type mr_type,
|
enum ib_mr_type mr_type,
|
||||||
u32 max_num_sg);
|
u32 max_num_sg);
|
||||||
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
|
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset);
|
||||||
int sg_nents);
|
|
||||||
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||||
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
||||||
const struct ib_mad_hdr *in, size_t in_mad_size,
|
const struct ib_mad_hdr *in, size_t in_mad_size,
|
||||||
|
@ -1751,24 +1751,27 @@ done:
|
|||||||
static int
|
static int
|
||||||
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
|
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
|
||||||
struct scatterlist *sgl,
|
struct scatterlist *sgl,
|
||||||
unsigned short sg_nents)
|
unsigned short sg_nents,
|
||||||
|
unsigned int sg_offset)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg = sgl;
|
struct scatterlist *sg = sgl;
|
||||||
struct mlx5_klm *klms = mr->descs;
|
struct mlx5_klm *klms = mr->descs;
|
||||||
u32 lkey = mr->ibmr.pd->local_dma_lkey;
|
u32 lkey = mr->ibmr.pd->local_dma_lkey;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mr->ibmr.iova = sg_dma_address(sg);
|
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
|
||||||
mr->ibmr.length = 0;
|
mr->ibmr.length = 0;
|
||||||
mr->ndescs = sg_nents;
|
mr->ndescs = sg_nents;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_nents, i) {
|
for_each_sg(sgl, sg, sg_nents, i) {
|
||||||
if (unlikely(i > mr->max_descs))
|
if (unlikely(i > mr->max_descs))
|
||||||
break;
|
break;
|
||||||
klms[i].va = cpu_to_be64(sg_dma_address(sg));
|
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
|
||||||
klms[i].bcount = cpu_to_be32(sg_dma_len(sg));
|
klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
|
||||||
klms[i].key = cpu_to_be32(lkey);
|
klms[i].key = cpu_to_be32(lkey);
|
||||||
mr->ibmr.length += sg_dma_len(sg);
|
mr->ibmr.length += sg_dma_len(sg);
|
||||||
|
|
||||||
|
sg_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return i;
|
return i;
|
||||||
@ -1788,9 +1791,8 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
|
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset)
|
||||||
int sg_nents)
|
|
||||||
{
|
{
|
||||||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||||
int n;
|
int n;
|
||||||
@ -1802,9 +1804,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
|
|||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
|
if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
|
||||||
n = mlx5_ib_sg_to_klms(mr, sg, sg_nents);
|
n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
|
||||||
else
|
else
|
||||||
n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
|
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
|
||||||
|
mlx5_set_page);
|
||||||
|
|
||||||
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
|
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
|
||||||
mr->desc_size * mr->max_descs,
|
mr->desc_size * mr->max_descs,
|
||||||
|
@ -402,15 +402,14 @@ static int nes_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nes_map_mr_sg(struct ib_mr *ibmr,
|
static int nes_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||||
struct scatterlist *sg,
|
int sg_nents, unsigned int sg_offset)
|
||||||
int sg_nents)
|
|
||||||
{
|
{
|
||||||
struct nes_mr *nesmr = to_nesmr(ibmr);
|
struct nes_mr *nesmr = to_nesmr(ibmr);
|
||||||
|
|
||||||
nesmr->npages = 0;
|
nesmr->npages = 0;
|
||||||
|
|
||||||
return ib_sg_to_pages(ibmr, sg, sg_nents, nes_set_page);
|
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, nes_set_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3081,13 +3081,12 @@ static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ocrdma_map_mr_sg(struct ib_mr *ibmr,
|
int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset)
|
||||||
int sg_nents)
|
|
||||||
{
|
{
|
||||||
struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
|
struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
|
||||||
|
|
||||||
mr->npages = 0;
|
mr->npages = 0;
|
||||||
|
|
||||||
return ib_sg_to_pages(ibmr, sg, sg_nents, ocrdma_set_page);
|
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
|
||||||
}
|
}
|
||||||
|
@ -122,8 +122,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
|
|||||||
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd,
|
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd,
|
||||||
enum ib_mr_type mr_type,
|
enum ib_mr_type mr_type,
|
||||||
u32 max_num_sg);
|
u32 max_num_sg);
|
||||||
int ocrdma_map_mr_sg(struct ib_mr *ibmr,
|
int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned sg_offset);
|
||||||
int sg_nents);
|
|
||||||
|
|
||||||
#endif /* __OCRDMA_VERBS_H__ */
|
#endif /* __OCRDMA_VERBS_H__ */
|
||||||
|
@ -236,7 +236,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
|
|||||||
page_vec->npages = 0;
|
page_vec->npages = 0;
|
||||||
page_vec->fake_mr.page_size = SIZE_4K;
|
page_vec->fake_mr.page_size = SIZE_4K;
|
||||||
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
|
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
|
||||||
mem->size, iser_set_page);
|
mem->size, 0, iser_set_page);
|
||||||
if (unlikely(plen < mem->size)) {
|
if (unlikely(plen < mem->size)) {
|
||||||
iser_err("page vec too short to hold this SG\n");
|
iser_err("page vec too short to hold this SG\n");
|
||||||
iser_data_buf_dump(mem, device->ib_device);
|
iser_data_buf_dump(mem, device->ib_device);
|
||||||
@ -446,7 +446,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
|||||||
|
|
||||||
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
|
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
|
||||||
|
|
||||||
n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K);
|
n = ib_map_mr_sg(mr, mem->sg, mem->size, 0, SIZE_4K);
|
||||||
if (unlikely(n != mem->size)) {
|
if (unlikely(n != mem->size)) {
|
||||||
iser_err("failed to map sg (%d/%d)\n",
|
iser_err("failed to map sg (%d/%d)\n",
|
||||||
n, mem->size);
|
n, mem->size);
|
||||||
|
@ -2461,7 +2461,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|||||||
wr = &inv_wr;
|
wr = &inv_wr;
|
||||||
}
|
}
|
||||||
|
|
||||||
n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE);
|
n = ib_map_mr_sg(mr, mem->sg, mem->nents, 0, PAGE_SIZE);
|
||||||
if (unlikely(n != mem->nents)) {
|
if (unlikely(n != mem->nents)) {
|
||||||
isert_err("failed to map mr sg (%d/%d)\n",
|
isert_err("failed to map mr sg (%d/%d)\n",
|
||||||
n, mem->nents);
|
n, mem->nents);
|
||||||
|
@ -1329,7 +1329,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
|
|||||||
rkey = ib_inc_rkey(desc->mr->rkey);
|
rkey = ib_inc_rkey(desc->mr->rkey);
|
||||||
ib_update_fast_reg_key(desc->mr, rkey);
|
ib_update_fast_reg_key(desc->mr, rkey);
|
||||||
|
|
||||||
n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
|
n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, 0, dev->mr_page_size);
|
||||||
if (unlikely(n < 0))
|
if (unlikely(n < 0))
|
||||||
return n;
|
return n;
|
||||||
|
|
||||||
|
@ -1827,7 +1827,8 @@ struct ib_device {
|
|||||||
u32 max_num_sg);
|
u32 max_num_sg);
|
||||||
int (*map_mr_sg)(struct ib_mr *mr,
|
int (*map_mr_sg)(struct ib_mr *mr,
|
||||||
struct scatterlist *sg,
|
struct scatterlist *sg,
|
||||||
int sg_nents);
|
int sg_nents,
|
||||||
|
unsigned sg_offset);
|
||||||
struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
|
struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
|
||||||
enum ib_mw_type type,
|
enum ib_mw_type type,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
@ -3111,29 +3112,23 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
|
|||||||
u16 pkey, const union ib_gid *gid,
|
u16 pkey, const union ib_gid *gid,
|
||||||
const struct sockaddr *addr);
|
const struct sockaddr *addr);
|
||||||
|
|
||||||
int ib_map_mr_sg(struct ib_mr *mr,
|
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset, unsigned int page_size);
|
||||||
int sg_nents,
|
|
||||||
unsigned int page_size);
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
ib_map_mr_sg_zbva(struct ib_mr *mr,
|
ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
|
||||||
struct scatterlist *sg,
|
unsigned int sg_offset, unsigned int page_size)
|
||||||
int sg_nents,
|
|
||||||
unsigned int page_size)
|
|
||||||
{
|
{
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
n = ib_map_mr_sg(mr, sg, sg_nents, page_size);
|
n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
|
||||||
mr->iova = 0;
|
mr->iova = 0;
|
||||||
|
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ib_sg_to_pages(struct ib_mr *mr,
|
int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
|
||||||
struct scatterlist *sgl,
|
unsigned int sg_offset, int (*set_page)(struct ib_mr *, u64));
|
||||||
int sg_nents,
|
|
||||||
int (*set_page)(struct ib_mr *, u64));
|
|
||||||
|
|
||||||
void ib_drain_rq(struct ib_qp *qp);
|
void ib_drain_rq(struct ib_qp *qp);
|
||||||
void ib_drain_sq(struct ib_qp *qp);
|
void ib_drain_sq(struct ib_qp *qp);
|
||||||
|
@ -111,7 +111,7 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
|
|||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, PAGE_SIZE);
|
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 0, PAGE_SIZE);
|
||||||
if (unlikely(ret != ibmr->sg_len))
|
if (unlikely(ret != ibmr->sg_len))
|
||||||
return ret < 0 ? ret : -EINVAL;
|
return ret < 0 ? ret : -EINVAL;
|
||||||
|
|
||||||
|
@ -421,7 +421,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, PAGE_SIZE);
|
n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, 0, PAGE_SIZE);
|
||||||
if (unlikely(n != frmr->sg_nents)) {
|
if (unlikely(n != frmr->sg_nents)) {
|
||||||
pr_err("RPC: %s: failed to map mr %p (%u/%u)\n",
|
pr_err("RPC: %s: failed to map mr %p (%u/%u)\n",
|
||||||
__func__, frmr->fr_mr, n, frmr->sg_nents);
|
__func__, frmr->fr_mr, n, frmr->sg_nents);
|
||||||
|
@ -281,7 +281,7 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
|
|||||||
}
|
}
|
||||||
atomic_inc(&xprt->sc_dma_used);
|
atomic_inc(&xprt->sc_dma_used);
|
||||||
|
|
||||||
n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, PAGE_SIZE);
|
n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, 0, PAGE_SIZE);
|
||||||
if (unlikely(n != frmr->sg_nents)) {
|
if (unlikely(n != frmr->sg_nents)) {
|
||||||
pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
|
pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
|
||||||
frmr->mr, n, frmr->sg_nents);
|
frmr->mr, n, frmr->sg_nents);
|
||||||
|
Loading…
Reference in New Issue
Block a user