IB/qib: Remove ib_sg_dma_address() and ib_sg_dma_len() overloads

Remove the overload for .dma_len and .dma_address

The removal of these methods is compensated for by code changes to
.map_sg to insure that the vanilla sg_dma_address() and sg_dma_len()
will do the same thing as the equivalent former ib_sg_dma_address()
and ib_sg_dma_len() calls into the drivers.

Suggested-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Tested-by: Vinod Kumar <vinod.kumar@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Mike Marciniszyn 2014-03-28 13:26:42 -04:00 committed by Roland Dreier
parent cfbf8d4857
commit 446bf432a9

View File

@ -108,6 +108,10 @@ static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
ret = 0;
break;
}
sg->dma_address = addr + sg->offset;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
}
return ret;
}
@ -119,21 +123,6 @@ static void qib_unmap_sg(struct ib_device *dev,
BUG_ON(!valid_dma_direction(direction));
}
static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
{
u64 addr = (u64) page_address(sg_page(sg));
if (addr)
addr += sg->offset;
return addr;
}
static unsigned int qib_sg_dma_len(struct ib_device *dev,
struct scatterlist *sg)
{
return sg->length;
}
static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir)
{
@ -173,8 +162,6 @@ struct ib_dma_mapping_ops qib_dma_mapping_ops = {
.unmap_page = qib_dma_unmap_page,
.map_sg = qib_map_sg,
.unmap_sg = qib_unmap_sg,
.dma_address = qib_sg_dma_address,
.dma_len = qib_sg_dma_len,
.sync_single_for_cpu = qib_sync_single_for_cpu,
.sync_single_for_device = qib_sync_single_for_device,
.alloc_coherent = qib_dma_alloc_coherent,