forked from Minki/linux
17a941d854
AK: I hacked Muli's original patch a lot and there were a lot of changes - all bugs are probably to blame on me now. There were also some changes in the fall back behaviour for swiotlb - in particular it doesn't try to use GFP_DMA now anymore. Also all DMA mapping operations use the same core dma_alloc_coherent code with proper fallbacks now. And various other changes and cleanups. Known problems: iommu=force swiotlb=force together breaks needs more testing. This patch cleans up x86_64's DMA mapping dispatching code. Right now we have three possible IOMMU types: AGP GART, swiotlb and nommu, and in the future we will also have Xen's x86_64 swiotlb and other HW IOMMUs for x86_64. In order to support all of them cleanly, this patch: - introduces a struct dma_mapping_ops with function pointers for each of the DMA mapping operations of gart (AMD HW IOMMU), swiotlb (software IOMMU) and nommu (no IOMMU). - gets rid of: if (swiotlb) return swiotlb_xxx(); - PCI_DMA_BUS_IS_PHYS is now checked against the dma_ops being set This makes swiotlb faster by avoiding double copying in some cases. Signed-Off-By: Muli Ben-Yehuda <mulix@mulix.org> Signed-Off-By: Jon D. Mason <jdmason@us.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
43 lines
1.3 KiB
C
43 lines
1.3 KiB
C
/* Glue code to lib/swiotlb.c */
|
|
|
|
#include <linux/pci.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/module.h>
|
|
#include <asm/dma-mapping.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/swiotlb.h>
|
|
#include <asm/dma.h>
|
|
|
|
int swiotlb __read_mostly;
|
|
EXPORT_SYMBOL(swiotlb);
|
|
|
|
struct dma_mapping_ops swiotlb_dma_ops = {
|
|
.mapping_error = swiotlb_dma_mapping_error,
|
|
.alloc_coherent = swiotlb_alloc_coherent,
|
|
.free_coherent = swiotlb_free_coherent,
|
|
.map_single = swiotlb_map_single,
|
|
.unmap_single = swiotlb_unmap_single,
|
|
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
.sync_single_for_device = swiotlb_sync_single_for_device,
|
|
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
|
|
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
|
|
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
|
.map_sg = swiotlb_map_sg,
|
|
.unmap_sg = swiotlb_unmap_sg,
|
|
.dma_supported = NULL,
|
|
};
|
|
|
|
void pci_swiotlb_init(void)
|
|
{
|
|
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
|
if (!iommu_aperture && !no_iommu &&
|
|
(end_pfn > MAX_DMA32_PFN || force_iommu))
|
|
swiotlb = 1;
|
|
if (swiotlb) {
|
|
swiotlb_init();
|
|
printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
|
|
dma_ops = &swiotlb_dma_ops;
|
|
}
|
|
}
|