arm: make SWIOTLB available
IOMMU_HELPER is needed because SWIOTLB calls iommu_is_span_boundary, provided by lib/iommu_helper.c. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> CC: will.deacon@arm.com Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> Changes in v9: - remove uneeded include asm/cacheflush.h; - just return 0 if !dev->dma_mask in dma_capable. Changes in v8: - use __phys_to_pfn and __pfn_to_phys. Changes in v7: - dma_mark_clean: empty implementation; - in dma_capable use coherent_dma_mask if dma_mask hasn't been allocated. Changes in v6: - check for dev->dma_mask being NULL in dma_capable. Changes in v5: - implement dma_mark_clean using dmac_flush_range. Changes in v3: - dma_capable: do not treat dma_mask as a limit; - remove SWIOTLB dependency on NEED_SG_DMA_LENGTH.
This commit is contained in:
		
							parent
							
								
									3b284bde70
								
							
						
					
					
						commit
						fbd989b1d7
					
				| @ -1872,6 +1872,12 @@ config CC_STACKPROTECTOR | ||||
| 	  neutralized via a kernel panic. | ||||
| 	  This feature requires gcc version 4.2 or above. | ||||
| 
 | ||||
| config SWIOTLB | ||||
| 	def_bool y | ||||
| 
 | ||||
| config IOMMU_HELPER | ||||
| 	def_bool SWIOTLB | ||||
| 
 | ||||
| config XEN_DOM0 | ||||
| 	def_bool y | ||||
| 	depends on XEN | ||||
|  | ||||
| @ -86,6 +86,39 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||||
| { | ||||
| 	unsigned int offset = paddr & ~PAGE_MASK; | ||||
| 	return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; | ||||
| } | ||||
| 
 | ||||
| static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) | ||||
| { | ||||
| 	unsigned int offset = dev_addr & ~PAGE_MASK; | ||||
| 	return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; | ||||
| } | ||||
| 
 | ||||
| static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||||
| { | ||||
| 	u64 limit, mask; | ||||
| 
 | ||||
| 	if (!dev->dma_mask) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	mask = *dev->dma_mask; | ||||
| 
 | ||||
| 	limit = (mask + 1) & ~mask; | ||||
| 	if (limit && size > limit) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if ((addr | (addr + size - 1)) & ~mask) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| static inline void dma_mark_clean(void *addr, size_t size) { } | ||||
| 
 | ||||
| /*
 | ||||
|  * DMA errors are defined by all-bits-set in the DMA address. | ||||
|  */ | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user