forked from Minki/linux
8d8bb39b9e
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER architecture does: This enables us to cleanly fix the Calgary IOMMU issue that some devices are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423). I think that per-device dma_mapping_ops support would be also helpful for KVM people to support PCI passthrough but Andi thinks that this makes it difficult to support the PCI passthrough (see the above thread). So I CC'ed this to KVM camp. Comments are appreciated. A pointer to dma_mapping_ops to struct dev_archdata is added. If the pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's NULL, the system-wide dma_ops pointer is used as before. If it's useful for KVM people, I plan to implement a mechanism to register a hook called when a new pci (or dma capable) device is created (it works with hot plugging). It enables IOMMUs to set up an appropriate dma_mapping_ops per device. The major obstacle is that dma_mapping_error doesn't take a pointer to the device unlike other DMA operations. So x86 can't have dma_mapping_ops per device. Note all the POWER IOMMUs use the same dma_mapping_error function so this is not a problem for POWER but x86 IOMMUs use different dma_mapping_error functions. The first patch adds the device argument to dma_mapping_error. The patch is trivial but large since it touches lots of drivers and dma-mapping.h in all the architecture. This patch: dma_mapping_error() doesn't take a pointer to the device unlike other DMA operations. So we can't have dma_mapping_ops per device. Note that POWER already has dma_mapping_ops per device but all the POWER IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device argument. [akpm@linux-foundation.org: fix sge] [akpm@linux-foundation.org: fix svc_rdma] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix bnx2x] [akpm@linux-foundation.org: fix s2io] [akpm@linux-foundation.org: fix pasemi_mac] [akpm@linux-foundation.org: fix sdhci] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc] [akpm@linux-foundation.org: fix ibmvscsi] Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
274 lines
7.6 KiB
C
274 lines
7.6 KiB
C
#ifndef _ASM_DMA_MAPPING_H_
|
|
#define _ASM_DMA_MAPPING_H_
|
|
|
|
/*
|
|
* IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
|
|
* documentation.
|
|
*/
|
|
|
|
#include <linux/scatterlist.h>
|
|
#include <asm/io.h>
|
|
#include <asm/swiotlb.h>
|
|
|
|
extern dma_addr_t bad_dma_address;
|
|
extern int iommu_merge;
|
|
extern struct device fallback_dev;
|
|
extern int panic_on_overflow;
|
|
extern int force_iommu;
|
|
|
|
struct dma_mapping_ops {
|
|
int (*mapping_error)(struct device *dev,
|
|
dma_addr_t dma_addr);
|
|
void* (*alloc_coherent)(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t gfp);
|
|
void (*free_coherent)(struct device *dev, size_t size,
|
|
void *vaddr, dma_addr_t dma_handle);
|
|
dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
|
|
size_t size, int direction);
|
|
/* like map_single, but doesn't check the device mask */
|
|
dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
|
|
size_t size, int direction);
|
|
void (*unmap_single)(struct device *dev, dma_addr_t addr,
|
|
size_t size, int direction);
|
|
void (*sync_single_for_cpu)(struct device *hwdev,
|
|
dma_addr_t dma_handle, size_t size,
|
|
int direction);
|
|
void (*sync_single_for_device)(struct device *hwdev,
|
|
dma_addr_t dma_handle, size_t size,
|
|
int direction);
|
|
void (*sync_single_range_for_cpu)(struct device *hwdev,
|
|
dma_addr_t dma_handle, unsigned long offset,
|
|
size_t size, int direction);
|
|
void (*sync_single_range_for_device)(struct device *hwdev,
|
|
dma_addr_t dma_handle, unsigned long offset,
|
|
size_t size, int direction);
|
|
void (*sync_sg_for_cpu)(struct device *hwdev,
|
|
struct scatterlist *sg, int nelems,
|
|
int direction);
|
|
void (*sync_sg_for_device)(struct device *hwdev,
|
|
struct scatterlist *sg, int nelems,
|
|
int direction);
|
|
int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
|
|
int nents, int direction);
|
|
void (*unmap_sg)(struct device *hwdev,
|
|
struct scatterlist *sg, int nents,
|
|
int direction);
|
|
int (*dma_supported)(struct device *hwdev, u64 mask);
|
|
int is_phys;
|
|
};
|
|
|
|
extern struct dma_mapping_ops *dma_ops;
|
|
|
|
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
return dma_ops;
|
|
#else
|
|
if (unlikely(!dev) || !dev->archdata.dma_ops)
|
|
return dma_ops;
|
|
else
|
|
return dev->archdata.dma_ops;
|
|
#endif
|
|
}
|
|
|
|
/* Make sure we keep the same behaviour */
|
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
return 0;
|
|
#else
|
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
if (ops->mapping_error)
|
|
return ops->mapping_error(dev, dma_addr);
|
|
|
|
return (dma_addr == bad_dma_address);
|
|
#endif
|
|
}
|
|
|
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
|
|
void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flag);
|
|
|
|
void dma_free_coherent(struct device *dev, size_t size,
|
|
void *vaddr, dma_addr_t dma_handle);
|
|
|
|
|
|
extern int dma_supported(struct device *hwdev, u64 mask);
|
|
extern int dma_set_mask(struct device *dev, u64 mask);
|
|
|
|
static inline dma_addr_t
|
|
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
|
int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
|
|
}
|
|
|
|
static inline void
|
|
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
|
|
int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->unmap_single)
|
|
ops->unmap_single(dev, addr, size, direction);
|
|
}
|
|
|
|
static inline int
|
|
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|
int nents, int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
return ops->map_sg(hwdev, sg, nents, direction);
|
|
}
|
|
|
|
static inline void
|
|
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->unmap_sg)
|
|
ops->unmap_sg(hwdev, sg, nents, direction);
|
|
}
|
|
|
|
static inline void
|
|
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
size_t size, int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->sync_single_for_cpu)
|
|
ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline void
|
|
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
size_t size, int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->sync_single_for_device)
|
|
ops->sync_single_for_device(hwdev, dma_handle, size, direction);
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline void
|
|
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
unsigned long offset, size_t size, int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->sync_single_range_for_cpu)
|
|
ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
|
size, direction);
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline void
|
|
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
unsigned long offset, size_t size,
|
|
int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->sync_single_range_for_device)
|
|
ops->sync_single_range_for_device(hwdev, dma_handle,
|
|
offset, size, direction);
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline void
|
|
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->sync_sg_for_cpu)
|
|
ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline void
|
|
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
if (ops->sync_sg_for_device)
|
|
ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
|
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
size_t offset, size_t size,
|
|
int direction)
|
|
{
|
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
return ops->map_single(dev, page_to_phys(page) + offset,
|
|
size, direction);
|
|
}
|
|
|
|
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
|
size_t size, int direction)
|
|
{
|
|
dma_unmap_single(dev, addr, size, direction);
|
|
}
|
|
|
|
static inline void
|
|
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
enum dma_data_direction dir)
|
|
{
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline int dma_get_cache_alignment(void)
|
|
{
|
|
/* no easy way to get cache size on all x86, so return the
|
|
* maximum possible, to be safe */
|
|
return boot_cpu_data.x86_clflush_size;
|
|
}
|
|
|
|
#define dma_is_consistent(d, h) (1)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
|
struct dma_coherent_mem {
|
|
void *virt_base;
|
|
u32 device_base;
|
|
int size;
|
|
int flags;
|
|
unsigned long *bitmap;
|
|
};
|
|
|
|
extern int
|
|
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
|
dma_addr_t device_addr, size_t size, int flags);
|
|
|
|
extern void
|
|
dma_release_declared_memory(struct device *dev);
|
|
|
|
extern void *
|
|
dma_mark_declared_memory_occupied(struct device *dev,
|
|
dma_addr_t device_addr, size_t size);
|
|
#endif /* CONFIG_X86_32 */
|
|
#endif
|