forked from Minki/linux
fe7e85c6f5
The dma_get_required_mask() function is used by some drivers to query the platform about what DMA mask is needed to cover all of memory. This is a bit of a strange semantic when we have to choose between IOMMU translation or bypass, but essentially what it means is "what DMA mask will give best performances". Currently, our IOMMU backend always returns a 32-bit mask here, we don't do anything special to it when we have bypass available. This causes some drivers to choose a 32-bit mask, thus losing the ability to use the bypass window, thinking this is more efficient. The problem was reported from the driver of following device: 0004:03:00.0 0107: 1000:0087 (rev 05) 0004:03:00.0 Serial Attached SCSI controller: LSI Logic / Symbios \ Logic SAS2308 PCI-Express Fusion-MPT SAS-2 (rev 05) This patch adds an override of that function in order to, instead, return a 64-bit mask whenever a bypass window is available in order for drivers to prefer this configuration. Reported-by: Murali N. Iyer <mniyer@us.ibm.com> Suggested-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
250 lines
6.1 KiB
C
250 lines
6.1 KiB
C
/*
|
|
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
|
|
*
|
|
* Provide default implementations of the DMA mapping callbacks for
|
|
* directly mapped busses.
|
|
*/
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/dma-debug.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/export.h>
|
|
#include <linux/pci.h>
|
|
#include <asm/vio.h>
|
|
#include <asm/bug.h>
|
|
#include <asm/machdep.h>
|
|
|
|
/*
|
|
* Generic direct DMA implementation
|
|
*
|
|
* This implementation supports a per-device offset that can be applied if
|
|
* the address at which memory is visible to devices is not 0. Platform code
|
|
* can set archdata.dma_data to an unsigned long holding the offset. By
|
|
* default the offset is PCI_DRAM_OFFSET.
|
|
*/
|
|
|
|
|
|
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flag,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
void *ret;
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
|
|
if (ret == NULL)
|
|
return NULL;
|
|
*dma_handle += get_dma_offset(dev);
|
|
return ret;
|
|
#else
|
|
struct page *page;
|
|
int node = dev_to_node(dev);
|
|
|
|
/* ignore region specifiers */
|
|
flag &= ~(__GFP_HIGHMEM);
|
|
|
|
page = alloc_pages_node(node, flag, get_order(size));
|
|
if (page == NULL)
|
|
return NULL;
|
|
ret = page_address(page);
|
|
memset(ret, 0, size);
|
|
*dma_handle = __pa(ret) + get_dma_offset(dev);
|
|
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
void dma_direct_free_coherent(struct device *dev, size_t size,
|
|
void *vaddr, dma_addr_t dma_handle,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
__dma_free_coherent(size, vaddr);
|
|
#else
|
|
free_pages((unsigned long)vaddr, get_order(size));
|
|
#endif
|
|
}
|
|
|
|
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
unsigned long pfn;
|
|
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
|
|
#else
|
|
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
|
#endif
|
|
return remap_pfn_range(vma, vma->vm_start,
|
|
pfn + vma->vm_pgoff,
|
|
vma->vm_end - vma->vm_start,
|
|
vma->vm_page_prot);
|
|
}
|
|
|
|
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|
int nents, enum dma_data_direction direction,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
struct scatterlist *sg;
|
|
int i;
|
|
|
|
for_each_sg(sgl, sg, nents, i) {
|
|
sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
|
|
sg->dma_length = sg->length;
|
|
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
|
}
|
|
|
|
return nents;
|
|
}
|
|
|
|
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
int nents, enum dma_data_direction direction,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
}
|
|
|
|
static int dma_direct_dma_supported(struct device *dev, u64 mask)
|
|
{
|
|
#ifdef CONFIG_PPC64
|
|
/* Could be improved so platforms can set the limit in case
|
|
* they have limited DMA windows
|
|
*/
|
|
return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
|
|
#else
|
|
return 1;
|
|
#endif
|
|
}
|
|
|
|
static u64 dma_direct_get_required_mask(struct device *dev)
|
|
{
|
|
u64 end, mask;
|
|
|
|
end = memblock_end_of_DRAM() + get_dma_offset(dev);
|
|
|
|
mask = 1ULL << (fls64(end) - 1);
|
|
mask += mask - 1;
|
|
|
|
return mask;
|
|
}
|
|
|
|
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
|
struct page *page,
|
|
unsigned long offset,
|
|
size_t size,
|
|
enum dma_data_direction dir,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
BUG_ON(dir == DMA_NONE);
|
|
__dma_sync_page(page, offset, size, dir);
|
|
return page_to_phys(page) + offset + get_dma_offset(dev);
|
|
}
|
|
|
|
static inline void dma_direct_unmap_page(struct device *dev,
|
|
dma_addr_t dma_address,
|
|
size_t size,
|
|
enum dma_data_direction direction,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
}
|
|
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
static inline void dma_direct_sync_sg(struct device *dev,
|
|
struct scatterlist *sgl, int nents,
|
|
enum dma_data_direction direction)
|
|
{
|
|
struct scatterlist *sg;
|
|
int i;
|
|
|
|
for_each_sg(sgl, sg, nents, i)
|
|
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
|
}
|
|
|
|
static inline void dma_direct_sync_single(struct device *dev,
|
|
dma_addr_t dma_handle, size_t size,
|
|
enum dma_data_direction direction)
|
|
{
|
|
__dma_sync(bus_to_virt(dma_handle), size, direction);
|
|
}
|
|
#endif
|
|
|
|
struct dma_map_ops dma_direct_ops = {
|
|
.alloc = dma_direct_alloc_coherent,
|
|
.free = dma_direct_free_coherent,
|
|
.mmap = dma_direct_mmap_coherent,
|
|
.map_sg = dma_direct_map_sg,
|
|
.unmap_sg = dma_direct_unmap_sg,
|
|
.dma_supported = dma_direct_dma_supported,
|
|
.map_page = dma_direct_map_page,
|
|
.unmap_page = dma_direct_unmap_page,
|
|
.get_required_mask = dma_direct_get_required_mask,
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
.sync_single_for_cpu = dma_direct_sync_single,
|
|
.sync_single_for_device = dma_direct_sync_single,
|
|
.sync_sg_for_cpu = dma_direct_sync_sg,
|
|
.sync_sg_for_device = dma_direct_sync_sg,
|
|
#endif
|
|
};
|
|
EXPORT_SYMBOL(dma_direct_ops);
|
|
|
|
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
|
|
|
|
int __dma_set_mask(struct device *dev, u64 dma_mask)
|
|
{
|
|
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
|
|
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
|
|
return dma_ops->set_dma_mask(dev, dma_mask);
|
|
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
return -EIO;
|
|
*dev->dma_mask = dma_mask;
|
|
return 0;
|
|
}
|
|
|
|
int dma_set_mask(struct device *dev, u64 dma_mask)
|
|
{
|
|
if (ppc_md.dma_set_mask)
|
|
return ppc_md.dma_set_mask(dev, dma_mask);
|
|
return __dma_set_mask(dev, dma_mask);
|
|
}
|
|
EXPORT_SYMBOL(dma_set_mask);
|
|
|
|
u64 __dma_get_required_mask(struct device *dev)
|
|
{
|
|
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
|
|
if (unlikely(dma_ops == NULL))
|
|
return 0;
|
|
|
|
if (dma_ops->get_required_mask)
|
|
return dma_ops->get_required_mask(dev);
|
|
|
|
return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
|
|
}
|
|
|
|
u64 dma_get_required_mask(struct device *dev)
|
|
{
|
|
if (ppc_md.dma_get_required_mask)
|
|
return ppc_md.dma_get_required_mask(dev);
|
|
|
|
return __dma_get_required_mask(dev);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dma_get_required_mask);
|
|
|
|
static int __init dma_init(void)
|
|
{
|
|
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
|
#ifdef CONFIG_PCI
|
|
dma_debug_add_bus(&pci_bus_type);
|
|
#endif
|
|
#ifdef CONFIG_IBMVIO
|
|
dma_debug_add_bus(&vio_bus_type);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
fs_initcall(dma_init);
|
|
|