dma-mapping: drop the dev argument to arch_sync_dma_for_*
These are pure cache maintainance routines, so drop the unused struct device argument. Signed-off-by: Christoph Hellwig <hch@lst.de> Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
		
							parent
							
								
									e380a0394c
								
							
						
					
					
						commit
						56e35f9c5b
					
				| @ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size) | ||||
|  * upper layer functions (in include/linux/dma-mapping.h) | ||||
|  */ | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_TO_DEVICE: | ||||
| @ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_TO_DEVICE: | ||||
|  | ||||
| @ -2332,15 +2332,15 @@ void arch_teardown_dma_ops(struct device *dev) | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_SWIOTLB | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), | ||||
| 			      size, dir); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), | ||||
| 			      size, dir); | ||||
|  | ||||
| @ -70,20 +70,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op) | ||||
|  * pfn_valid returns true the pages is local and we can use the native | ||||
|  * dma-direct functions, otherwise we call the Xen specific version. | ||||
|  */ | ||||
| void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, | ||||
| 		phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||||
| void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	if (pfn_valid(PFN_DOWN(handle))) | ||||
| 		arch_sync_dma_for_cpu(dev, paddr, size, dir); | ||||
| 		arch_sync_dma_for_cpu(paddr, size, dir); | ||||
| 	else if (dir != DMA_TO_DEVICE) | ||||
| 		dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); | ||||
| } | ||||
| 
 | ||||
| void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, | ||||
| 		phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||||
| void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	if (pfn_valid(PFN_DOWN(handle))) | ||||
| 		arch_sync_dma_for_device(dev, paddr, size, dir); | ||||
| 		arch_sync_dma_for_device(paddr, size, dir); | ||||
| 	else if (dir == DMA_FROM_DEVICE) | ||||
| 		dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); | ||||
| 	else | ||||
|  | ||||
| @ -13,14 +13,14 @@ | ||||
| 
 | ||||
| #include <asm/cacheflush.h> | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_map_area(phys_to_virt(paddr), size, dir); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_unmap_area(phys_to_virt(paddr), size, dir); | ||||
| } | ||||
|  | ||||
| @ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size) | ||||
| 		      sizeof(long)); | ||||
| } | ||||
| 
 | ||||
| static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size, | ||||
| static void c6x_dma_sync(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	BUG_ON(!valid_dma_direction(dir)); | ||||
| @ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	return c6x_dma_sync(dev, paddr, size, dir); | ||||
| 	return c6x_dma_sync(paddr, size, dir); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	return c6x_dma_sync(dev, paddr, size, dir); | ||||
| 	return c6x_dma_sync(paddr, size, dir); | ||||
| } | ||||
|  | ||||
| @ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size) | ||||
| 	cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 			      size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_TO_DEVICE: | ||||
| @ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 			   size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_TO_DEVICE: | ||||
|  | ||||
| @ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, | ||||
| 	gen_pool_free(coherent_pool, (unsigned long) vaddr, size); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	void *addr = phys_to_virt(paddr); | ||||
| 
 | ||||
|  | ||||
| @ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte) | ||||
|  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | ||||
|  * flush them when they get mapped into an executable vm-area. | ||||
|  */ | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	unsigned long pfn = PHYS_PFN(paddr); | ||||
| 
 | ||||
|  | ||||
| @ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, | ||||
| 
 | ||||
| #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t handle, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_BIDIRECTIONAL: | ||||
|  | ||||
| @ -15,7 +15,7 @@ | ||||
| #include <linux/bug.h> | ||||
| #include <asm/cacheflush.h> | ||||
| 
 | ||||
| static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size, | ||||
| static void __dma_sync(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction direction) | ||||
| { | ||||
| 	switch (direction) { | ||||
| @ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_sync(dev, paddr, size, dir); | ||||
| 	__dma_sync(paddr, size, dir); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_sync(dev, paddr, size, dir); | ||||
| 	__dma_sync(paddr, size, dir); | ||||
| } | ||||
|  | ||||
| @ -64,7 +64,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) | ||||
| 	return dma_addr; | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu_all(struct device *dev) | ||||
| void arch_sync_dma_for_cpu_all(void) | ||||
| { | ||||
| 	void __iomem *cbr = BMIPS_GET_CBR(); | ||||
| 	u32 cfg; | ||||
|  | ||||
| @ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, | ||||
| 	phys_addr_t phys = page_to_phys(page) + offset; | ||||
| 
 | ||||
| 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 		arch_sync_dma_for_device(dev, phys, size, dir); | ||||
| 		arch_sync_dma_for_device(phys, size, dir); | ||||
| 	return vdma_alloc(phys, size); | ||||
| } | ||||
| 
 | ||||
| @ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | ||||
| 		size_t size, enum dma_data_direction dir, unsigned long attrs) | ||||
| { | ||||
| 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 		arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir); | ||||
| 		arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir); | ||||
| 	vdma_free(dma_addr); | ||||
| } | ||||
| 
 | ||||
| @ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||||
| 
 | ||||
| 	for_each_sg(sglist, sg, nents, i) { | ||||
| 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 			arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, | ||||
| 			arch_sync_dma_for_device(sg_phys(sg), sg->length, | ||||
| 				dir); | ||||
| 		sg->dma_address = vdma_alloc(sg_phys(sg), sg->length); | ||||
| 		if (sg->dma_address == DMA_MAPPING_ERROR) | ||||
| @ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||||
| 
 | ||||
| 	for_each_sg(sglist, sg, nents, i) { | ||||
| 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 			arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, | ||||
| 				dir); | ||||
| 			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); | ||||
| 		vdma_free(sg->dma_address); | ||||
| 	} | ||||
| } | ||||
| @ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||||
| static void jazz_dma_sync_single_for_device(struct device *dev, | ||||
| 		dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||||
| { | ||||
| 	arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir); | ||||
| 	arch_sync_dma_for_device(vdma_log2phys(addr), size, dir); | ||||
| } | ||||
| 
 | ||||
| static void jazz_dma_sync_single_for_cpu(struct device *dev, | ||||
| 		dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||||
| { | ||||
| 	arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir); | ||||
| 	arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir); | ||||
| } | ||||
| 
 | ||||
| static void jazz_dma_sync_sg_for_device(struct device *dev, | ||||
| @ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev, | ||||
| 	int i; | ||||
| 
 | ||||
| 	for_each_sg(sgl, sg, nents, i) | ||||
| 		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | ||||
| 		arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); | ||||
| } | ||||
| 
 | ||||
| static void jazz_dma_sync_sg_for_cpu(struct device *dev, | ||||
| @ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev, | ||||
| 	int i; | ||||
| 
 | ||||
| 	for_each_sg(sgl, sg, nents, i) | ||||
| 		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | ||||
| 		arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); | ||||
| } | ||||
| 
 | ||||
| const struct dma_map_ops jazz_dma_ops = { | ||||
|  | ||||
| @ -27,7 +27,7 @@ | ||||
|  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. | ||||
|  * SGI IP32 aka O2. | ||||
|  */ | ||||
| static inline bool cpu_needs_post_dma_flush(struct device *dev) | ||||
| static inline bool cpu_needs_post_dma_flush(void) | ||||
| { | ||||
| 	switch (boot_cpu_type()) { | ||||
| 	case CPU_R10000: | ||||
| @ -112,17 +112,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, | ||||
| 	} while (left); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	dma_sync_phys(paddr, size, dir); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	if (cpu_needs_post_dma_flush(dev)) | ||||
| 	if (cpu_needs_post_dma_flush()) | ||||
| 		dma_sync_phys(paddr, size, dir); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| @ -46,8 +46,8 @@ static inline void cache_op(phys_addr_t paddr, size_t size, | ||||
| 	} while (left); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_FROM_DEVICE: | ||||
| @ -61,8 +61,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_TO_DEVICE: | ||||
|  | ||||
| @ -18,8 +18,8 @@ | ||||
| #include <linux/cache.h> | ||||
| #include <asm/cacheflush.h> | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	void *vaddr = phys_to_virt(paddr); | ||||
| 
 | ||||
| @ -42,8 +42,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	void *vaddr = phys_to_virt(paddr); | ||||
| 
 | ||||
|  | ||||
| @ -125,7 +125,7 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr, | ||||
| 	free_pages_exact(vaddr, size); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size, | ||||
| void arch_sync_dma_for_device(phys_addr_t addr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	unsigned long cl; | ||||
|  | ||||
| @ -439,14 +439,14 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, | ||||
| 	free_pages((unsigned long)__va(dma_handle), order); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); | ||||
| } | ||||
|  | ||||
| @ -104,14 +104,14 @@ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir) | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_sync_page(paddr, size, dir); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	__dma_sync_page(paddr, size, dir); | ||||
| } | ||||
|  | ||||
| @ -25,7 +25,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||||
| 	 * Pages from the page allocator may have data present in | ||||
| 	 * cache. So flush the cache before using uncached memory. | ||||
| 	 */ | ||||
| 	arch_sync_dma_for_device(dev, virt_to_phys(ret), size, | ||||
| 	arch_sync_dma_for_device(virt_to_phys(ret), size, | ||||
| 			DMA_BIDIRECTIONAL); | ||||
| 
 | ||||
| 	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); | ||||
| @ -59,8 +59,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, | ||||
| 	iounmap(vaddr); | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	void *addr = sh_cacheop_vaddr(phys_to_virt(paddr)); | ||||
| 
 | ||||
|  | ||||
| @ -366,8 +366,8 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, | ||||
| 
 | ||||
| /* IIep is write-through, not flushing on cpu to device transfer. */ | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	if (dir != PCI_DMA_TODEVICE) | ||||
| 		dma_make_coherent(paddr, PAGE_ALIGN(size)); | ||||
|  | ||||
| @ -44,8 +44,8 @@ static void do_cache_op(phys_addr_t paddr, size_t size, | ||||
| 		} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_BIDIRECTIONAL: | ||||
| @ -62,8 +62,8 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir) | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_BIDIRECTIONAL: | ||||
|  | ||||
| @ -659,7 +659,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, | ||||
| 		return; | ||||
| 
 | ||||
| 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); | ||||
| 	arch_sync_dma_for_cpu(dev, phys, size, dir); | ||||
| 	arch_sync_dma_for_cpu(phys, size, dir); | ||||
| } | ||||
| 
 | ||||
| static void iommu_dma_sync_single_for_device(struct device *dev, | ||||
| @ -671,7 +671,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, | ||||
| 		return; | ||||
| 
 | ||||
| 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); | ||||
| 	arch_sync_dma_for_device(dev, phys, size, dir); | ||||
| 	arch_sync_dma_for_device(phys, size, dir); | ||||
| } | ||||
| 
 | ||||
| static void iommu_dma_sync_sg_for_cpu(struct device *dev, | ||||
| @ -685,7 +685,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, | ||||
| 		return; | ||||
| 
 | ||||
| 	for_each_sg(sgl, sg, nelems, i) | ||||
| 		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | ||||
| 		arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); | ||||
| } | ||||
| 
 | ||||
| static void iommu_dma_sync_sg_for_device(struct device *dev, | ||||
| @ -699,7 +699,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, | ||||
| 		return; | ||||
| 
 | ||||
| 	for_each_sg(sgl, sg, nelems, i) | ||||
| 		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | ||||
| 		arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); | ||||
| } | ||||
| 
 | ||||
| static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | ||||
| @ -714,7 +714,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | ||||
| 	dma_handle =__iommu_dma_map(dev, phys, size, prot); | ||||
| 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && | ||||
| 	    dma_handle != DMA_MAPPING_ERROR) | ||||
| 		arch_sync_dma_for_device(dev, phys, size, dir); | ||||
| 		arch_sync_dma_for_device(phys, size, dir); | ||||
| 	return dma_handle; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -405,7 +405,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | ||||
| 
 | ||||
| done: | ||||
| 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 		xen_dma_sync_for_device(dev, dev_addr, phys, size, dir); | ||||
| 		xen_dma_sync_for_device(dev_addr, phys, size, dir); | ||||
| 	return dev_addr; | ||||
| } | ||||
| 
 | ||||
| @ -425,7 +425,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | ||||
| 	BUG_ON(dir == DMA_NONE); | ||||
| 
 | ||||
| 	if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 		xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir); | ||||
| 		xen_dma_sync_for_cpu(dev_addr, paddr, size, dir); | ||||
| 
 | ||||
| 	/* NOTE: We use dev_addr here, not paddr! */ | ||||
| 	if (is_xen_swiotlb_buffer(dev_addr)) | ||||
| @ -439,7 +439,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, | ||||
| 	phys_addr_t paddr = xen_bus_to_phys(dma_addr); | ||||
| 
 | ||||
| 	if (!dev_is_dma_coherent(dev)) | ||||
| 		xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); | ||||
| 		xen_dma_sync_for_cpu(dma_addr, paddr, size, dir); | ||||
| 
 | ||||
| 	if (is_xen_swiotlb_buffer(dma_addr)) | ||||
| 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); | ||||
| @ -455,7 +455,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, | ||||
| 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); | ||||
| 
 | ||||
| 	if (!dev_is_dma_coherent(dev)) | ||||
| 		xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir); | ||||
| 		xen_dma_sync_for_device(dma_addr, paddr, size, dir); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -73,29 +73,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr, | ||||
| #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||||
| void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir); | ||||
| void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir); | ||||
| #else | ||||
| static inline void arch_sync_dma_for_device(struct device *dev, | ||||
| 		phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||||
| static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| } | ||||
| #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||||
| void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||||
| 		size_t size, enum dma_data_direction dir); | ||||
| void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir); | ||||
| #else | ||||
| static inline void arch_sync_dma_for_cpu(struct device *dev, | ||||
| 		phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||||
| static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| } | ||||
| #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL | ||||
| void arch_sync_dma_for_cpu_all(struct device *dev); | ||||
| void arch_sync_dma_for_cpu_all(void); | ||||
| #else | ||||
| static inline void arch_sync_dma_for_cpu_all(struct device *dev) | ||||
| static inline void arch_sync_dma_for_cpu_all(void) | ||||
| { | ||||
| } | ||||
| #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ | ||||
|  | ||||
| @ -4,10 +4,10 @@ | ||||
| 
 | ||||
| #include <linux/swiotlb.h> | ||||
| 
 | ||||
| void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, | ||||
| 		phys_addr_t paddr, size_t size, enum dma_data_direction dir); | ||||
| void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, | ||||
| 		phys_addr_t paddr, size_t size, enum dma_data_direction dir); | ||||
| void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir); | ||||
| void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir); | ||||
| 
 | ||||
| extern int xen_swiotlb_init(int verbose, bool early); | ||||
| extern const struct dma_map_ops xen_swiotlb_dma_ops; | ||||
|  | ||||
| @ -268,7 +268,7 @@ void dma_direct_sync_single_for_device(struct device *dev, | ||||
| 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); | ||||
| 
 | ||||
| 	if (!dev_is_dma_coherent(dev)) | ||||
| 		arch_sync_dma_for_device(dev, paddr, size, dir); | ||||
| 		arch_sync_dma_for_device(paddr, size, dir); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_direct_sync_single_for_device); | ||||
| 
 | ||||
| @ -286,7 +286,7 @@ void dma_direct_sync_sg_for_device(struct device *dev, | ||||
| 					dir, SYNC_FOR_DEVICE); | ||||
| 
 | ||||
| 		if (!dev_is_dma_coherent(dev)) | ||||
| 			arch_sync_dma_for_device(dev, paddr, sg->length, | ||||
| 			arch_sync_dma_for_device(paddr, sg->length, | ||||
| 					dir); | ||||
| 	} | ||||
| } | ||||
| @ -302,8 +302,8 @@ void dma_direct_sync_single_for_cpu(struct device *dev, | ||||
| 	phys_addr_t paddr = dma_to_phys(dev, addr); | ||||
| 
 | ||||
| 	if (!dev_is_dma_coherent(dev)) { | ||||
| 		arch_sync_dma_for_cpu(dev, paddr, size, dir); | ||||
| 		arch_sync_dma_for_cpu_all(dev); | ||||
| 		arch_sync_dma_for_cpu(paddr, size, dir); | ||||
| 		arch_sync_dma_for_cpu_all(); | ||||
| 	} | ||||
| 
 | ||||
| 	if (unlikely(is_swiotlb_buffer(paddr))) | ||||
| @ -321,7 +321,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, | ||||
| 		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); | ||||
| 
 | ||||
| 		if (!dev_is_dma_coherent(dev)) | ||||
| 			arch_sync_dma_for_cpu(dev, paddr, sg->length, dir); | ||||
| 			arch_sync_dma_for_cpu(paddr, sg->length, dir); | ||||
| 
 | ||||
| 		if (unlikely(is_swiotlb_buffer(paddr))) | ||||
| 			swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, | ||||
| @ -329,7 +329,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, | ||||
| 	} | ||||
| 
 | ||||
| 	if (!dev_is_dma_coherent(dev)) | ||||
| 		arch_sync_dma_for_cpu_all(dev); | ||||
| 		arch_sync_dma_for_cpu_all(); | ||||
| } | ||||
| EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu); | ||||
| 
 | ||||
| @ -380,7 +380,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | ||||
| 	} | ||||
| 
 | ||||
| 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 		arch_sync_dma_for_device(dev, phys, size, dir); | ||||
| 		arch_sync_dma_for_device(phys, size, dir); | ||||
| 	return dma_addr; | ||||
| } | ||||
| EXPORT_SYMBOL(dma_direct_map_page); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user