|
|
|
@ -73,11 +73,18 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
|
|
|
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
|
|
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
|
|
|
|
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
|
|
|
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
|
@ -96,7 +103,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
|
|
|
|
handle & ~PAGE_MASK, size, dir);
|
|
|
|
|
}
|
|
|
|
@ -106,8 +113,7 @@ static void arm_dma_sync_single_for_cpu(struct device *dev,
|
|
|
|
|
{
|
|
|
|
|
unsigned int offset = handle & (PAGE_SIZE - 1);
|
|
|
|
|
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
|
|
|
|
if (!arch_is_coherent())
|
|
|
|
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
|
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void arm_dma_sync_single_for_device(struct device *dev,
|
|
|
|
@ -115,8 +121,7 @@ static void arm_dma_sync_single_for_device(struct device *dev,
|
|
|
|
|
{
|
|
|
|
|
unsigned int offset = handle & (PAGE_SIZE - 1);
|
|
|
|
|
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
|
|
|
|
if (!arch_is_coherent())
|
|
|
|
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
|
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
|
|
|
|
@ -138,6 +143,22 @@ struct dma_map_ops arm_dma_ops = {
|
|
|
|
|
};
|
|
|
|
|
EXPORT_SYMBOL(arm_dma_ops);
|
|
|
|
|
|
|
|
|
|
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
|
|
|
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
|
|
|
|
|
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
|
|
dma_addr_t handle, struct dma_attrs *attrs);
|
|
|
|
|
|
|
|
|
|
struct dma_map_ops arm_coherent_dma_ops = {
|
|
|
|
|
.alloc = arm_coherent_dma_alloc,
|
|
|
|
|
.free = arm_coherent_dma_free,
|
|
|
|
|
.mmap = arm_dma_mmap,
|
|
|
|
|
.get_sgtable = arm_dma_get_sgtable,
|
|
|
|
|
.map_page = arm_coherent_dma_map_page,
|
|
|
|
|
.map_sg = arm_dma_map_sg,
|
|
|
|
|
.set_dma_mask = arm_dma_set_mask,
|
|
|
|
|
};
|
|
|
|
|
EXPORT_SYMBOL(arm_coherent_dma_ops);
|
|
|
|
|
|
|
|
|
|
static u64 get_coherent_dma_mask(struct device *dev)
|
|
|
|
|
{
|
|
|
|
|
u64 mask = (u64)arm_dma_limit;
|
|
|
|
@ -586,7 +607,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
|
|
|
gfp_t gfp, pgprot_t prot, const void *caller)
|
|
|
|
|
gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
|
|
|
|
|
{
|
|
|
|
|
u64 mask = get_coherent_dma_mask(dev);
|
|
|
|
|
struct page *page;
|
|
|
|
@ -619,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
|
|
|
*handle = DMA_ERROR_CODE;
|
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
|
|
|
|
|
|
if (arch_is_coherent() || nommu())
|
|
|
|
|
if (is_coherent || nommu())
|
|
|
|
|
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
|
|
|
|
else if (gfp & GFP_ATOMIC)
|
|
|
|
|
addr = __alloc_from_pool(size, &page);
|
|
|
|
@ -647,7 +668,20 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
|
|
|
if (dma_alloc_from_coherent(dev, size, handle, &memory))
|
|
|
|
|
return memory;
|
|
|
|
|
|
|
|
|
|
return __dma_alloc(dev, size, handle, gfp, prot,
|
|
|
|
|
return __dma_alloc(dev, size, handle, gfp, prot, false,
|
|
|
|
|
__builtin_return_address(0));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
|
|
|
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
|
|
|
|
|
void *memory;
|
|
|
|
|
|
|
|
|
|
if (dma_alloc_from_coherent(dev, size, handle, &memory))
|
|
|
|
|
return memory;
|
|
|
|
|
|
|
|
|
|
return __dma_alloc(dev, size, handle, gfp, prot, true,
|
|
|
|
|
__builtin_return_address(0));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -684,8 +718,9 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
|
|
|
/*
|
|
|
|
|
* Free a buffer as defined by the above mapping.
|
|
|
|
|
*/
|
|
|
|
|
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
|
|
dma_addr_t handle, struct dma_attrs *attrs)
|
|
|
|
|
static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
|
|
dma_addr_t handle, struct dma_attrs *attrs,
|
|
|
|
|
bool is_coherent)
|
|
|
|
|
{
|
|
|
|
|
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
|
|
|
|
|
|
|
|
@ -694,7 +729,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
|
|
|
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
|
|
|
|
|
|
if (arch_is_coherent() || nommu()) {
|
|
|
|
|
if (is_coherent || nommu()) {
|
|
|
|
|
__dma_free_buffer(page, size);
|
|
|
|
|
} else if (__free_from_pool(cpu_addr, size)) {
|
|
|
|
|
return;
|
|
|
|
@ -710,6 +745,18 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
|
|
dma_addr_t handle, struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
|
|
dma_addr_t handle, struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
|
|
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
@ -1012,11 +1059,12 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
|
|
|
|
|
if (!pages[i])
|
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
|
|
if (order)
|
|
|
|
|
if (order) {
|
|
|
|
|
split_page(pages[i], order);
|
|
|
|
|
j = 1 << order;
|
|
|
|
|
while (--j)
|
|
|
|
|
pages[i + j] = pages[i] + j;
|
|
|
|
|
j = 1 << order;
|
|
|
|
|
while (--j)
|
|
|
|
|
pages[i + j] = pages[i] + j;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__dma_clear_buffer(pages[i], PAGE_SIZE << order);
|
|
|
|
|
i += 1 << order;
|
|
|
|
@ -1303,7 +1351,8 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
|
|
|
*/
|
|
|
|
|
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
size_t size, dma_addr_t *handle,
|
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs,
|
|
|
|
|
bool is_coherent)
|
|
|
|
|
{
|
|
|
|
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
|
|
dma_addr_t iova, iova_base;
|
|
|
|
@ -1322,8 +1371,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
phys_addr_t phys = page_to_phys(sg_page(s));
|
|
|
|
|
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
|
|
|
|
|
|
|
|
|
if (!arch_is_coherent() &&
|
|
|
|
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
if (!is_coherent &&
|
|
|
|
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
|
|
|
|
|
|
|
|
|
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
|
|
|
@ -1341,20 +1390,9 @@ fail:
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
|
* @sg: list of buffers
|
|
|
|
|
* @nents: number of buffers to map
|
|
|
|
|
* @dir: DMA transfer direction
|
|
|
|
|
*
|
|
|
|
|
* Map a set of buffers described by scatterlist in streaming mode for DMA.
|
|
|
|
|
* The scatter gather list elements are merged together (if possible) and
|
|
|
|
|
* tagged with the appropriate dma address and length. They are obtained via
|
|
|
|
|
* sg_dma_{address,length}.
|
|
|
|
|
*/
|
|
|
|
|
int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
|
static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs,
|
|
|
|
|
bool is_coherent)
|
|
|
|
|
{
|
|
|
|
|
struct scatterlist *s = sg, *dma = sg, *start = sg;
|
|
|
|
|
int i, count = 0;
|
|
|
|
@ -1370,7 +1408,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
|
|
|
|
|
|
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
|
|
|
|
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
|
|
|
|
|
dir, attrs) < 0)
|
|
|
|
|
dir, attrs, is_coherent) < 0)
|
|
|
|
|
goto bad_mapping;
|
|
|
|
|
|
|
|
|
|
dma->dma_address += offset;
|
|
|
|
@ -1383,7 +1421,8 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
|
}
|
|
|
|
|
size += s->length;
|
|
|
|
|
}
|
|
|
|
|
if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
|
|
|
|
|
if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
|
|
|
|
|
is_coherent) < 0)
|
|
|
|
|
goto bad_mapping;
|
|
|
|
|
|
|
|
|
|
dma->dma_address += offset;
|
|
|
|
@ -1397,6 +1436,76 @@ bad_mapping:
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
|
* @sg: list of buffers
|
|
|
|
|
* @nents: number of buffers to map
|
|
|
|
|
* @dir: DMA transfer direction
|
|
|
|
|
*
|
|
|
|
|
* Map a set of i/o coherent buffers described by scatterlist in streaming
|
|
|
|
|
* mode for DMA. The scatter gather list elements are merged together (if
|
|
|
|
|
* possible) and tagged with the appropriate dma address and length. They are
|
|
|
|
|
* obtained via sg_dma_{address,length}.
|
|
|
|
|
*/
|
|
|
|
|
int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
|
* @sg: list of buffers
|
|
|
|
|
* @nents: number of buffers to map
|
|
|
|
|
* @dir: DMA transfer direction
|
|
|
|
|
*
|
|
|
|
|
* Map a set of buffers described by scatterlist in streaming mode for DMA.
|
|
|
|
|
* The scatter gather list elements are merged together (if possible) and
|
|
|
|
|
* tagged with the appropriate dma address and length. They are obtained via
|
|
|
|
|
* sg_dma_{address,length}.
|
|
|
|
|
*/
|
|
|
|
|
int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
|
|
|
|
|
bool is_coherent)
|
|
|
|
|
{
|
|
|
|
|
struct scatterlist *s;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for_each_sg(sg, s, nents, i) {
|
|
|
|
|
if (sg_dma_len(s))
|
|
|
|
|
__iommu_remove_mapping(dev, sg_dma_address(s),
|
|
|
|
|
sg_dma_len(s));
|
|
|
|
|
if (!is_coherent &&
|
|
|
|
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
|
|
|
|
s->length, dir);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
|
* @sg: list of buffers
|
|
|
|
|
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
|
|
|
|
|
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
|
|
*
|
|
|
|
|
* Unmap a set of streaming mode DMA translations. Again, CPU access
|
|
|
|
|
* rules concerning calls here are the same as for dma_unmap_single().
|
|
|
|
|
*/
|
|
|
|
|
void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
@ -1410,18 +1519,7 @@ bad_mapping:
|
|
|
|
|
void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
struct scatterlist *s;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for_each_sg(sg, s, nents, i) {
|
|
|
|
|
if (sg_dma_len(s))
|
|
|
|
|
__iommu_remove_mapping(dev, sg_dma_address(s),
|
|
|
|
|
sg_dma_len(s));
|
|
|
|
|
if (!arch_is_coherent() &&
|
|
|
|
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
|
|
|
|
s->length, dir);
|
|
|
|
|
}
|
|
|
|
|
__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -1438,8 +1536,7 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for_each_sg(sg, s, nents, i)
|
|
|
|
|
if (!arch_is_coherent())
|
|
|
|
|
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
|
|
|
|
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1457,11 +1554,42 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for_each_sg(sg, s, nents, i)
|
|
|
|
|
if (!arch_is_coherent())
|
|
|
|
|
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
|
|
|
|
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_coherent_iommu_map_page
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
|
* @page: page that buffer resides in
|
|
|
|
|
* @offset: offset into page for start of buffer
|
|
|
|
|
* @size: size of buffer to map
|
|
|
|
|
* @dir: DMA transfer direction
|
|
|
|
|
*
|
|
|
|
|
* Coherent IOMMU aware version of arm_dma_map_page()
|
|
|
|
|
*/
|
|
|
|
|
static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
|
|
|
|
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
|
int ret, len = PAGE_ALIGN(size + offset);
|
|
|
|
|
|
|
|
|
|
dma_addr = __alloc_iova(mapping, len);
|
|
|
|
|
if (dma_addr == DMA_ERROR_CODE)
|
|
|
|
|
return dma_addr;
|
|
|
|
|
|
|
|
|
|
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
return dma_addr + offset;
|
|
|
|
|
fail:
|
|
|
|
|
__free_iova(mapping, dma_addr, len);
|
|
|
|
|
return DMA_ERROR_CODE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_iommu_map_page
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
@ -1476,25 +1604,35 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
|
|
|
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
|
int ret, len = PAGE_ALIGN(size + offset);
|
|
|
|
|
|
|
|
|
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
|
|
|
|
|
|
|
dma_addr = __alloc_iova(mapping, len);
|
|
|
|
|
if (dma_addr == DMA_ERROR_CODE)
|
|
|
|
|
return dma_addr;
|
|
|
|
|
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto fail;
|
|
|
|
|
/**
|
|
|
|
|
* arm_coherent_iommu_unmap_page
|
|
|
|
|
* @dev: valid struct device pointer
|
|
|
|
|
* @handle: DMA address of buffer
|
|
|
|
|
* @size: size of buffer (same as passed to dma_map_page)
|
|
|
|
|
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
|
|
|
|
*
|
|
|
|
|
* Coherent IOMMU aware version of arm_dma_unmap_page()
|
|
|
|
|
*/
|
|
|
|
|
static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
|
{
|
|
|
|
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
|
|
dma_addr_t iova = handle & PAGE_MASK;
|
|
|
|
|
int offset = handle & ~PAGE_MASK;
|
|
|
|
|
int len = PAGE_ALIGN(size + offset);
|
|
|
|
|
|
|
|
|
|
return dma_addr + offset;
|
|
|
|
|
fail:
|
|
|
|
|
__free_iova(mapping, dma_addr, len);
|
|
|
|
|
return DMA_ERROR_CODE;
|
|
|
|
|
if (!iova)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
iommu_unmap(mapping->domain, iova, len);
|
|
|
|
|
__free_iova(mapping, iova, len);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -1519,7 +1657,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
|
|
if (!iova)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
|
|
|
|
|
|
|
iommu_unmap(mapping->domain, iova, len);
|
|
|
|
@ -1537,8 +1675,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
|
|
|
|
|
if (!iova)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (!arch_is_coherent())
|
|
|
|
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
|
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void arm_iommu_sync_single_for_device(struct device *dev,
|
|
|
|
@ -1572,6 +1709,19 @@ struct dma_map_ops iommu_ops = {
|
|
|
|
|
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct dma_map_ops iommu_coherent_ops = {
|
|
|
|
|
.alloc = arm_iommu_alloc_attrs,
|
|
|
|
|
.free = arm_iommu_free_attrs,
|
|
|
|
|
.mmap = arm_iommu_mmap_attrs,
|
|
|
|
|
.get_sgtable = arm_iommu_get_sgtable,
|
|
|
|
|
|
|
|
|
|
.map_page = arm_coherent_iommu_map_page,
|
|
|
|
|
.unmap_page = arm_coherent_iommu_unmap_page,
|
|
|
|
|
|
|
|
|
|
.map_sg = arm_coherent_iommu_map_sg,
|
|
|
|
|
.unmap_sg = arm_coherent_iommu_unmap_sg,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* arm_iommu_create_mapping
|
|
|
|
|
* @bus: pointer to the bus holding the client device (for IOMMU calls)
|
|
|
|
@ -1665,7 +1815,7 @@ int arm_iommu_attach_device(struct device *dev,
|
|
|
|
|
dev->archdata.mapping = mapping;
|
|
|
|
|
set_dma_ops(dev, &iommu_ops);
|
|
|
|
|
|
|
|
|
|
pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
|
|
|
|
|
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|