microblaze: Preliminary support for dma drivers

I found several problems for ll_temac driver and on system with WB.
This early fix should fix it. I will clean this patch before I will add
it to mainline

Signed-off-by: Michal Simek <monstr@monstr.eu>
This commit is contained in:
Michal Simek 2010-02-08 12:13:10 +01:00
parent 407c1da07d
commit d79f3b06a9
3 changed files with 17 additions and 16 deletions

View File

@ -140,9 +140,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
#define __page_address(page) \
(PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
#define page_to_bus(page) (page_to_phys(page))
#define bus_to_virt(addr) (phys_to_virt(addr))

View File

@ -61,12 +61,6 @@ extern unsigned int __page_offset;
*/
#define PAGE_OFFSET CONFIG_KERNEL_START
/*
* MAP_NR -- given an address, calculate the index of the page struct which
* points to the address's page.
*/
#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
/*
* The basic type of a PTE - 32 bit physical addressing.
*/
@ -154,7 +148,11 @@ extern int page_is_ram(unsigned long pfn);
# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
# ifdef CONFIG_MMU
# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
# else /* CONFIG_MMU */
# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))

View File

@ -21,10 +21,10 @@
* default the offset is PCI_DRAM_OFFSET.
*/
static inline void __dma_sync_page(void *vaddr, unsigned long offset,
static inline void __dma_sync_page(void *paddr, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
unsigned long start = virt_to_phys(vaddr);
unsigned long start = (unsigned long)paddr;
switch (direction) {
case DMA_TO_DEVICE:
@ -79,10 +79,12 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
struct scatterlist *sg;
int i;
/* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
sg->dma_length = sg->length;
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
sg->length, direction);
}
return nents;
@ -107,7 +109,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
struct dma_attrs *attrs)
{
BUG_ON(direction == DMA_NONE);
__dma_sync_page(page, offset, size, direction);
__dma_sync_page(page_to_phys(page), offset, size, direction);
return page_to_phys(page) + offset + get_dma_direct_offset(dev);
}
@ -117,8 +119,12 @@ static inline void dma_direct_unmap_page(struct device *dev,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
/* There is not necessary to do cache cleanup */
/* __dma_sync_page(dma_address, 0 , size, direction); */
/* There is not necessary to do cache cleanup
*
* phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
* dma_address is physical address
*/
__dma_sync_page((void *)dma_address, 0 , size, direction);
}
struct dma_map_ops dma_direct_ops = {