powerpc/dma: remove dma_nommu_mmap_coherent
The coherent cache version of this function already is functionally identicall to the default version, and by defining the arch_dma_coherent_to_pfn hook the same is ture for the noncoherent version as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
18b53a2d47
commit
6666cc17d7
@ -25,10 +25,6 @@ extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
|||||||
extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
|
extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
void *vaddr, dma_addr_t dma_handle,
|
||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
extern int dma_nommu_mmap_coherent(struct device *dev,
|
|
||||||
struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t handle,
|
|
||||||
size_t size, unsigned long attrs);
|
|
||||||
int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int nents, enum dma_data_direction direction,
|
int nents, enum dma_data_direction direction,
|
||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
|
@ -167,7 +167,6 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
|||||||
const struct dma_map_ops dma_iommu_ops = {
|
const struct dma_map_ops dma_iommu_ops = {
|
||||||
.alloc = dma_iommu_alloc_coherent,
|
.alloc = dma_iommu_alloc_coherent,
|
||||||
.free = dma_iommu_free_coherent,
|
.free = dma_iommu_free_coherent,
|
||||||
.mmap = dma_nommu_mmap_coherent,
|
|
||||||
.map_sg = dma_iommu_map_sg,
|
.map_sg = dma_iommu_map_sg,
|
||||||
.unmap_sg = dma_iommu_unmap_sg,
|
.unmap_sg = dma_iommu_unmap_sg,
|
||||||
.dma_supported = dma_iommu_dma_supported,
|
.dma_supported = dma_iommu_dma_supported,
|
||||||
|
@ -34,7 +34,6 @@ unsigned int ppc_swiotlb_enable;
|
|||||||
const struct dma_map_ops powerpc_swiotlb_dma_ops = {
|
const struct dma_map_ops powerpc_swiotlb_dma_ops = {
|
||||||
.alloc = __dma_nommu_alloc_coherent,
|
.alloc = __dma_nommu_alloc_coherent,
|
||||||
.free = __dma_nommu_free_coherent,
|
.free = __dma_nommu_free_coherent,
|
||||||
.mmap = dma_nommu_mmap_coherent,
|
|
||||||
.map_sg = dma_direct_map_sg,
|
.map_sg = dma_direct_map_sg,
|
||||||
.unmap_sg = dma_direct_unmap_sg,
|
.unmap_sg = dma_direct_unmap_sg,
|
||||||
.dma_supported = swiotlb_dma_supported,
|
.dma_supported = swiotlb_dma_supported,
|
||||||
|
@ -114,24 +114,6 @@ void __dma_nommu_free_coherent(struct device *dev, size_t size,
|
|||||||
}
|
}
|
||||||
#endif /* !CONFIG_NOT_COHERENT_CACHE */
|
#endif /* !CONFIG_NOT_COHERENT_CACHE */
|
||||||
|
|
||||||
int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
||||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
||||||
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
|
|
||||||
#else
|
|
||||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
|
||||||
#endif
|
|
||||||
return remap_pfn_range(vma, vma->vm_start,
|
|
||||||
pfn + vma->vm_pgoff,
|
|
||||||
vma->vm_end - vma->vm_start,
|
|
||||||
vma->vm_page_prot);
|
|
||||||
}
|
|
||||||
|
|
||||||
int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int nents, enum dma_data_direction direction,
|
int nents, enum dma_data_direction direction,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
@ -218,7 +200,6 @@ static inline void dma_nommu_sync_single(struct device *dev,
|
|||||||
const struct dma_map_ops dma_nommu_ops = {
|
const struct dma_map_ops dma_nommu_ops = {
|
||||||
.alloc = __dma_nommu_alloc_coherent,
|
.alloc = __dma_nommu_alloc_coherent,
|
||||||
.free = __dma_nommu_free_coherent,
|
.free = __dma_nommu_free_coherent,
|
||||||
.mmap = dma_nommu_mmap_coherent,
|
|
||||||
.map_sg = dma_nommu_map_sg,
|
.map_sg = dma_nommu_map_sg,
|
||||||
.unmap_sg = dma_nommu_unmap_sg,
|
.unmap_sg = dma_nommu_unmap_sg,
|
||||||
.dma_supported = dma_nommu_dma_supported,
|
.dma_supported = dma_nommu_dma_supported,
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/dma-direct.h>
|
#include <linux/dma-direct.h>
|
||||||
|
#include <linux/dma-noncoherent.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
@ -400,14 +401,16 @@ EXPORT_SYMBOL(__dma_sync_page);
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the PFN for a given cpu virtual address returned by
|
* Return the PFN for a given cpu virtual address returned by
|
||||||
* __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent()
|
* __dma_nommu_alloc_coherent.
|
||||||
*/
|
*/
|
||||||
unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
|
long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
|
||||||
|
dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
/* This should always be populated, so we don't test every
|
/* This should always be populated, so we don't test every
|
||||||
* level. If that fails, we'll have a nice crash which
|
* level. If that fails, we'll have a nice crash which
|
||||||
* will be as good as a BUG_ON()
|
* will be as good as a BUG_ON()
|
||||||
*/
|
*/
|
||||||
|
unsigned long cpu_addr = (unsigned long)vaddr;
|
||||||
pgd_t *pgd = pgd_offset_k(cpu_addr);
|
pgd_t *pgd = pgd_offset_k(cpu_addr);
|
||||||
pud_t *pud = pud_offset(pgd, cpu_addr);
|
pud_t *pud = pud_offset(pgd, cpu_addr);
|
||||||
pmd_t *pmd = pmd_offset(pud, cpu_addr);
|
pmd_t *pmd = pmd_offset(pud, cpu_addr);
|
||||||
|
@ -402,6 +402,7 @@ config NOT_COHERENT_CACHE
|
|||||||
bool
|
bool
|
||||||
depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
|
depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
|
||||||
GAMECUBE_COMMON || AMIGAONE
|
GAMECUBE_COMMON || AMIGAONE
|
||||||
|
select ARCH_HAS_DMA_COHERENT_TO_PFN
|
||||||
default n if PPC_47x
|
default n if PPC_47x
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
@ -603,7 +603,6 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
|||||||
static const struct dma_map_ops vio_dma_mapping_ops = {
|
static const struct dma_map_ops vio_dma_mapping_ops = {
|
||||||
.alloc = vio_dma_iommu_alloc_coherent,
|
.alloc = vio_dma_iommu_alloc_coherent,
|
||||||
.free = vio_dma_iommu_free_coherent,
|
.free = vio_dma_iommu_free_coherent,
|
||||||
.mmap = dma_nommu_mmap_coherent,
|
|
||||||
.map_sg = vio_dma_iommu_map_sg,
|
.map_sg = vio_dma_iommu_map_sg,
|
||||||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||||
.map_page = vio_dma_iommu_map_page,
|
.map_page = vio_dma_iommu_map_page,
|
||||||
|
Loading…
Reference in New Issue
Block a user