mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 09:41:44 +00:00
6ebbf2ce43
ARMv6 and greater introduced a new instruction ("bx") which can be used to return from function calls. Recent CPUs perform better when the "bx lr" instruction is used rather than the "mov pc, lr" instruction, and this sequence is strongly recommended to be used by the ARM architecture manual (section A.4.1.1). We provide a new macro "ret" with all its variants for the condition code which will resolve to the appropriate instruction. Rather than doing this piecemeal, and miss some instances, change all the "mov pc" instances to use the new macro, with the exception of the "movs" instruction and the kprobes code. This allows us to detect the "mov pc, lr" case and fix it up - and also gives us the possibility of deploying this for other registers depending on the CPU selection. Reported-by: Will Deacon <will.deacon@arm.com> Tested-by: Stephen Warren <swarren@nvidia.com> # Tegra Jetson TK1 Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> # mioa701_bootresume.S Tested-by: Andrew Lunn <andrew@lunn.ch> # Kirkwood Tested-by: Shawn Guo <shawn.guo@freescale.com> Tested-by: Tony Lindgren <tony@atomide.com> # OMAPs Tested-by: Gregory CLEMENT <gregory.clement@free-electrons.com> # Armada XP, 375, 385 Acked-by: Sekhar Nori <nsekhar@ti.com> # DaVinci Acked-by: Christoffer Dall <christoffer.dall@linaro.org> # kvm/hyp Acked-by: Haojian Zhuang <haojian.zhuang@gmail.com> # PXA3xx Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> # Xen Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> # ARMv7M Tested-by: Simon Horman <horms+renesas@verge.net.au> # Shmobile Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
336 lines
7.8 KiB
ArmAsm
336 lines
7.8 KiB
ArmAsm
/*
|
|
* linux/arch/arm/mm/cache-v6.S
|
|
*
|
|
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This is the "shell" of the ARMv6 processor support.
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/unwind.h>
|
|
|
|
#include "proc-macros.S"
|
|
|
|
#define HARVARD_CACHE
|
|
#define CACHE_LINE_SIZE 32
|
|
#define D_CACHE_LINE_SIZE 32
|
|
#define BTB_FLUSH_SIZE 8
|
|
|
|
/*
|
|
* v6_flush_icache_all()
|
|
*
|
|
* Flush the whole I-cache.
|
|
*
|
|
* ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
|
|
* This erratum is present in 1136, 1156 and 1176. It does not affect the
|
|
* MPCore.
|
|
*
|
|
* Registers:
|
|
* r0 - set to 0
|
|
* r1 - corrupted
|
|
*/
|
|
ENTRY(v6_flush_icache_all)
|
|
mov r0, #0
|
|
#ifdef CONFIG_ARM_ERRATA_411920
|
|
mrs r1, cpsr
|
|
cpsid ifa @ disable interrupts
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
|
|
msr cpsr_cx, r1 @ restore interrupts
|
|
.rept 11 @ ARM Ltd recommends at least
|
|
nop @ 11 NOPs
|
|
.endr
|
|
#else
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
|
|
#endif
|
|
ret lr
|
|
ENDPROC(v6_flush_icache_all)
|
|
|
|
/*
|
|
* v6_flush_cache_all()
|
|
*
|
|
* Flush the entire cache.
|
|
*
|
|
* It is assumed that:
|
|
*/
|
|
ENTRY(v6_flush_kern_cache_all)
|
|
mov r0, #0
|
|
#ifdef HARVARD_CACHE
|
|
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
|
|
#ifndef CONFIG_ARM_ERRATA_411920
|
|
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
|
|
#else
|
|
b v6_flush_icache_all
|
|
#endif
|
|
#else
|
|
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
|
|
#endif
|
|
ret lr
|
|
|
|
/*
|
|
* v6_flush_cache_all()
|
|
*
|
|
* Flush all TLB entries in a particular address space
|
|
*
|
|
* - mm - mm_struct describing address space
|
|
*/
|
|
ENTRY(v6_flush_user_cache_all)
|
|
/*FALLTHROUGH*/
|
|
|
|
/*
|
|
* v6_flush_cache_range(start, end, flags)
|
|
*
|
|
* Flush a range of TLB entries in the specified address space.
|
|
*
|
|
* - start - start address (may not be aligned)
|
|
* - end - end address (exclusive, may not be aligned)
|
|
* - flags - vm_area_struct flags describing address space
|
|
*
|
|
* It is assumed that:
|
|
* - we have a VIPT cache.
|
|
*/
|
|
ENTRY(v6_flush_user_cache_range)
|
|
ret lr
|
|
|
|
/*
|
|
* v6_coherent_kern_range(start,end)
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified
|
|
* region. This is typically used when code has been written to
|
|
* a memory region, and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*
|
|
* It is assumed that:
|
|
* - the Icache does not read data from the write buffer
|
|
*/
|
|
ENTRY(v6_coherent_kern_range)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* v6_coherent_user_range(start,end)
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified
|
|
* region. This is typically used when code has been written to
|
|
* a memory region, and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*
|
|
* It is assumed that:
|
|
* - the Icache does not read data from the write buffer
|
|
*/
|
|
ENTRY(v6_coherent_user_range)
|
|
UNWIND(.fnstart )
|
|
#ifdef HARVARD_CACHE
|
|
bic r0, r0, #CACHE_LINE_SIZE - 1
|
|
1:
|
|
USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
|
|
add r0, r0, #CACHE_LINE_SIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
#endif
|
|
mov r0, #0
|
|
#ifdef HARVARD_CACHE
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
|
#ifndef CONFIG_ARM_ERRATA_411920
|
|
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
|
|
#else
|
|
b v6_flush_icache_all
|
|
#endif
|
|
#else
|
|
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
|
|
#endif
|
|
ret lr
|
|
|
|
/*
|
|
* Fault handling for the cache operation above. If the virtual address in r0
|
|
* isn't mapped, fail with -EFAULT.
|
|
*/
|
|
9001:
|
|
mov r0, #-EFAULT
|
|
ret lr
|
|
UNWIND(.fnend )
|
|
ENDPROC(v6_coherent_user_range)
|
|
ENDPROC(v6_coherent_kern_range)
|
|
|
|
/*
|
|
* v6_flush_kern_dcache_area(void *addr, size_t size)
|
|
*
|
|
* Ensure that the data held in the page kaddr is written back
|
|
* to the page in question.
|
|
*
|
|
* - addr - kernel address
|
|
* - size - region size
|
|
*/
|
|
ENTRY(v6_flush_kern_dcache_area)
|
|
add r1, r0, r1
|
|
bic r0, r0, #D_CACHE_LINE_SIZE - 1
|
|
1:
|
|
#ifdef HARVARD_CACHE
|
|
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
|
|
#else
|
|
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
|
|
#endif
|
|
add r0, r0, #D_CACHE_LINE_SIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
#ifdef HARVARD_CACHE
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c10, 4
|
|
#endif
|
|
ret lr
|
|
|
|
|
|
/*
|
|
* v6_dma_inv_range(start,end)
|
|
*
|
|
* Invalidate the data cache within the specified region; we will
|
|
* be performing a DMA operation in this region and we want to
|
|
* purge old data in the cache.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
v6_dma_inv_range:
|
|
#ifdef CONFIG_DMA_CACHE_RWFO
|
|
ldrb r2, [r0] @ read for ownership
|
|
strb r2, [r0] @ write for ownership
|
|
#endif
|
|
tst r0, #D_CACHE_LINE_SIZE - 1
|
|
bic r0, r0, #D_CACHE_LINE_SIZE - 1
|
|
#ifdef HARVARD_CACHE
|
|
mcrne p15, 0, r0, c7, c10, 1 @ clean D line
|
|
#else
|
|
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
|
|
#endif
|
|
tst r1, #D_CACHE_LINE_SIZE - 1
|
|
#ifdef CONFIG_DMA_CACHE_RWFO
|
|
ldrneb r2, [r1, #-1] @ read for ownership
|
|
strneb r2, [r1, #-1] @ write for ownership
|
|
#endif
|
|
bic r1, r1, #D_CACHE_LINE_SIZE - 1
|
|
#ifdef HARVARD_CACHE
|
|
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
|
|
#else
|
|
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
|
|
#endif
|
|
1:
|
|
#ifdef HARVARD_CACHE
|
|
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
|
|
#else
|
|
mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
|
|
#endif
|
|
add r0, r0, #D_CACHE_LINE_SIZE
|
|
cmp r0, r1
|
|
#ifdef CONFIG_DMA_CACHE_RWFO
|
|
ldrlo r2, [r0] @ read for ownership
|
|
strlo r2, [r0] @ write for ownership
|
|
#endif
|
|
blo 1b
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
|
ret lr
|
|
|
|
/*
|
|
* v6_dma_clean_range(start,end)
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
v6_dma_clean_range:
|
|
bic r0, r0, #D_CACHE_LINE_SIZE - 1
|
|
1:
|
|
#ifdef CONFIG_DMA_CACHE_RWFO
|
|
ldr r2, [r0] @ read for ownership
|
|
#endif
|
|
#ifdef HARVARD_CACHE
|
|
mcr p15, 0, r0, c7, c10, 1 @ clean D line
|
|
#else
|
|
mcr p15, 0, r0, c7, c11, 1 @ clean unified line
|
|
#endif
|
|
add r0, r0, #D_CACHE_LINE_SIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
|
ret lr
|
|
|
|
/*
|
|
* v6_dma_flush_range(start,end)
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
ENTRY(v6_dma_flush_range)
|
|
#ifdef CONFIG_DMA_CACHE_RWFO
|
|
ldrb r2, [r0] @ read for ownership
|
|
strb r2, [r0] @ write for ownership
|
|
#endif
|
|
bic r0, r0, #D_CACHE_LINE_SIZE - 1
|
|
1:
|
|
#ifdef HARVARD_CACHE
|
|
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
|
|
#else
|
|
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
|
|
#endif
|
|
add r0, r0, #D_CACHE_LINE_SIZE
|
|
cmp r0, r1
|
|
#ifdef CONFIG_DMA_CACHE_RWFO
|
|
ldrlob r2, [r0] @ read for ownership
|
|
strlob r2, [r0] @ write for ownership
|
|
#endif
|
|
blo 1b
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
|
ret lr
|
|
|
|
/*
|
|
* dma_map_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
ENTRY(v6_dma_map_area)
|
|
add r1, r1, r0
|
|
teq r2, #DMA_FROM_DEVICE
|
|
beq v6_dma_inv_range
|
|
#ifndef CONFIG_DMA_CACHE_RWFO
|
|
b v6_dma_clean_range
|
|
#else
|
|
teq r2, #DMA_TO_DEVICE
|
|
beq v6_dma_clean_range
|
|
b v6_dma_flush_range
|
|
#endif
|
|
ENDPROC(v6_dma_map_area)
|
|
|
|
/*
|
|
* dma_unmap_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
ENTRY(v6_dma_unmap_area)
|
|
#ifndef CONFIG_DMA_CACHE_RWFO
|
|
add r1, r1, r0
|
|
teq r2, #DMA_TO_DEVICE
|
|
bne v6_dma_inv_range
|
|
#endif
|
|
ret lr
|
|
ENDPROC(v6_dma_unmap_area)
|
|
|
|
.globl v6_flush_kern_cache_louis
|
|
.equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
|
|
|
|
__INITDATA
|
|
|
|
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
|
|
define_cache_functions v6
|