iommu/dma: return error code from iommu_dma_map_sg()

Return appropriate error codes EINVAL or ENOMEM from
iommup_dma_map_sg(). If lower level code returns ENOMEM, then we
return it, other errors are coalesced into EINVAL.

iommu_dma_map_sg_swiotlb() returns -EIO as its an unknown error
from a call that returns DMA_MAPPING_ERROR.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Logan Gunthorpe 2021-07-29 14:15:22 -06:00 committed by Christoph Hellwig
parent ad8f36e4b6
commit dabb16f672

View File

@ -972,7 +972,7 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
out_unmap: out_unmap:
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
return 0; return -EIO;
} }
/* /*
@ -993,11 +993,13 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t iova; dma_addr_t iova;
size_t iova_len = 0; size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev); unsigned long mask = dma_get_seg_boundary(dev);
ssize_t ret;
int i; int i;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) && if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
iommu_deferred_attach(dev, domain)) ret = iommu_deferred_attach(dev, domain);
return 0; goto out;
}
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir); iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
@ -1045,14 +1047,17 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
} }
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) if (!iova) {
ret = -ENOMEM;
goto out_restore_sg; goto out_restore_sg;
}
/* /*
* We'll leave any physical concatenation to the IOMMU driver's * We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do. * implementation - it knows better than we do.
*/ */
if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
if (ret < iova_len)
goto out_free_iova; goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova); return __finalise_sg(dev, sg, nents, iova);
@ -1061,7 +1066,10 @@ out_free_iova:
iommu_dma_free_iova(cookie, iova, iova_len, NULL); iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg: out_restore_sg:
__invalidate_sg(sg, nents); __invalidate_sg(sg, nents);
return 0; out:
if (ret != -ENOMEM)
return -EINVAL;
return ret;
} }
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,