forked from Minki/linux
IB/iser: Pass the correct number of entries for dma mapped SGL
ib_dma_map_sg() augments the SGL into a 'dma mapped SGL'. This process may change the number of entries and the lengths of each entry. Code that touches dma_address is iterating over the 'dma mapped SGL' and must use dma_nents which returned from ib_dma_map_sg(). ib_sg_to_pages() and ib_map_mr_sg() are using dma_address so they must use dma_nents. Fixes:3940588500
("IB/iser: Port to new fast registration API") Fixes:bfe066e256
("IB/iser: Reuse ib_sg_to_pages") Signed-off-by: Israel Rukshin <israelr@mellanox.com> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Acked-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
790b57f686
commit
57b26497fa
@ -240,8 +240,8 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
|
||||
page_vec->npages = 0;
|
||||
page_vec->fake_mr.page_size = SIZE_4K;
|
||||
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
|
||||
mem->size, NULL, iser_set_page);
|
||||
if (unlikely(plen < mem->size)) {
|
||||
mem->dma_nents, NULL, iser_set_page);
|
||||
if (unlikely(plen < mem->dma_nents)) {
|
||||
iser_err("page vec too short to hold this SG\n");
|
||||
iser_data_buf_dump(mem, device->ib_device);
|
||||
iser_dump_page_vec(page_vec);
|
||||
@ -448,10 +448,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
||||
|
||||
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
|
||||
|
||||
n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
|
||||
if (unlikely(n != mem->size)) {
|
||||
n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
|
||||
if (unlikely(n != mem->dma_nents)) {
|
||||
iser_err("failed to map sg (%d/%d)\n",
|
||||
n, mem->size);
|
||||
n, mem->dma_nents);
|
||||
return n < 0 ? n : -EINVAL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user