forked from Minki/linux
c8c90860cd
Commit 81d11955bf
("ARM: 6405/1: Handle __flush_icache_all for
CONFIG_SMP_ON_UP") added a new function to struct cpu_cache_fns:
flush_icache_all(). It also implemented this for v6 and v7 but not
for v5 and backwards. Without the function pointer in place, we
will be calling wrong cache functions.
For example with ep93xx we get following:
Unable to handle kernel paging request at virtual address ee070f38
pgd = c0004000
[ee070f38] *pgd=00000000
Internal error: Oops: 80000005 [#1] PREEMPT
last sysfs file:
Modules linked in:
CPU: 0 Not tainted (2.6.36+ #1)
PC is at 0xee070f38
LR is at __dma_alloc+0x11c/0x2d0
pc : [<ee070f38>] lr : [<c0032c8c>] psr: 60000013
sp : c581bde0 ip : 00000000 fp : c0472000
r10: c0472000 r9 : 000000d0 r8 : 00020000
r7 : 0001ffff r6 : 00000000 r5 : c0472400 r4 : c5980000
r3 : c03ab7e0 r2 : 00000000 r1 : c59a0000 r0 : c5980000
Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel
Control: c000717f Table: c0004000 DAC: 00000017
Process swapper (pid: 1, stack limit = 0xc581a270)
[<c0032c8c>] (__dma_alloc+0x11c/0x2d0)
[<c0032e5c>] (dma_alloc_writecombine+0x1c/0x24)
[<c0204148>] (ep93xx_pcm_preallocate_dma_buffer+0x44/0x60)
[<c02041c0>] (ep93xx_pcm_new+0x5c/0x88)
[<c01ff188>] (snd_soc_instantiate_cards+0x8a8/0xbc0)
[<c01ff59c>] (soc_probe+0xfc/0x134)
[<c01adafc>] (platform_drv_probe+0x18/0x1c)
[<c01acca4>] (driver_probe_device+0xb0/0x16c)
[<c01ac284>] (bus_for_each_drv+0x48/0x84)
[<c01ace90>] (device_attach+0x50/0x68)
[<c01ac0f8>] (bus_probe_device+0x24/0x44)
[<c01aad7c>] (device_add+0x2fc/0x44c)
[<c01adfa8>] (platform_device_add+0x104/0x15c)
[<c0015eb8>] (simone_init+0x60/0x94)
[<c0021410>] (do_one_initcall+0xd0/0x1a4)
__dma_alloc() calls (inlined) __dma_alloc_buffer() which ends up
calling dmac_flush_range(). Now since the entries in the
arm920_cache_fns are shifted by one, we jump into address 0xee070f38
which is actually next instruction after the arm920_cache_fns
structure.
So implement flush_icache_all() for the rest of the supported CPUs
using a generic 'invalidate I cache' instruction.
Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
157 lines
3.4 KiB
ArmAsm
157 lines
3.4 KiB
ArmAsm
/*
|
|
* linux/arch/arm/mm/cache-v4.S
|
|
*
|
|
* Copyright (C) 1997-2002 Russell king
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <asm/page.h>
|
|
#include "proc-macros.S"
|
|
|
|
/*
|
|
* flush_icache_all()
|
|
*
|
|
* Unconditionally clean and invalidate the entire icache.
|
|
*/
|
|
ENTRY(v4_flush_icache_all)
|
|
mov pc, lr
|
|
ENDPROC(v4_flush_icache_all)
|
|
|
|
/*
|
|
* flush_user_cache_all()
|
|
*
|
|
* Invalidate all cache entries in a particular address
|
|
* space.
|
|
*
|
|
* - mm - mm_struct describing address space
|
|
*/
|
|
ENTRY(v4_flush_user_cache_all)
|
|
/* FALLTHROUGH */
|
|
/*
|
|
* flush_kern_cache_all()
|
|
*
|
|
* Clean and invalidate the entire cache.
|
|
*/
|
|
ENTRY(v4_flush_kern_cache_all)
|
|
#ifdef CONFIG_CPU_CP15
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
|
|
mov pc, lr
|
|
#else
|
|
/* FALLTHROUGH */
|
|
#endif
|
|
|
|
/*
|
|
* flush_user_cache_range(start, end, flags)
|
|
*
|
|
* Invalidate a range of cache entries in the specified
|
|
* address space.
|
|
*
|
|
* - start - start address (may not be aligned)
|
|
* - end - end address (exclusive, may not be aligned)
|
|
* - flags - vma_area_struct flags describing address space
|
|
*/
|
|
ENTRY(v4_flush_user_cache_range)
|
|
#ifdef CONFIG_CPU_CP15
|
|
mov ip, #0
|
|
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
|
|
mov pc, lr
|
|
#else
|
|
/* FALLTHROUGH */
|
|
#endif
|
|
|
|
/*
|
|
* coherent_kern_range(start, end)
|
|
*
|
|
* Ensure coherency between the Icache and the Dcache in the
|
|
* region described by start. If you have non-snooping
|
|
* Harvard caches, you need to implement this function.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*/
|
|
ENTRY(v4_coherent_kern_range)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* coherent_user_range(start, end)
|
|
*
|
|
* Ensure coherency between the Icache and the Dcache in the
|
|
* region described by start. If you have non-snooping
|
|
* Harvard caches, you need to implement this function.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*/
|
|
ENTRY(v4_coherent_user_range)
|
|
mov pc, lr
|
|
|
|
/*
|
|
* flush_kern_dcache_area(void *addr, size_t size)
|
|
*
|
|
* Ensure no D cache aliasing occurs, either with itself or
|
|
* the I cache
|
|
*
|
|
* - addr - kernel address
|
|
* - size - region size
|
|
*/
|
|
ENTRY(v4_flush_kern_dcache_area)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* dma_flush_range(start, end)
|
|
*
|
|
* Clean and invalidate the specified virtual address range.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*/
|
|
ENTRY(v4_dma_flush_range)
|
|
#ifdef CONFIG_CPU_CP15
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
|
|
#endif
|
|
mov pc, lr
|
|
|
|
/*
|
|
* dma_unmap_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
ENTRY(v4_dma_unmap_area)
|
|
teq r2, #DMA_TO_DEVICE
|
|
bne v4_dma_flush_range
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* dma_map_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
ENTRY(v4_dma_map_area)
|
|
mov pc, lr
|
|
ENDPROC(v4_dma_unmap_area)
|
|
ENDPROC(v4_dma_map_area)
|
|
|
|
__INITDATA
|
|
|
|
.type v4_cache_fns, #object
|
|
ENTRY(v4_cache_fns)
|
|
.long v4_flush_icache_all
|
|
.long v4_flush_kern_cache_all
|
|
.long v4_flush_user_cache_all
|
|
.long v4_flush_user_cache_range
|
|
.long v4_coherent_kern_range
|
|
.long v4_coherent_user_range
|
|
.long v4_flush_kern_dcache_area
|
|
.long v4_dma_map_area
|
|
.long v4_dma_unmap_area
|
|
.long v4_dma_flush_range
|
|
.size v4_cache_fns, . - v4_cache_fns
|