mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 05:41:55 +00:00
6ebbf2ce43
ARMv6 and greater introduced a new instruction ("bx") which can be used to return from function calls. Recent CPUs perform better when the "bx lr" instruction is used rather than the "mov pc, lr" instruction, and this sequence is strongly recommended to be used by the ARM architecture manual (section A.4.1.1). We provide a new macro "ret" with all its variants for the condition code which will resolve to the appropriate instruction. Rather than doing this piecemeal, and miss some instances, change all the "mov pc" instances to use the new macro, with the exception of the "movs" instruction and the kprobes code. This allows us to detect the "mov pc, lr" case and fix it up - and also gives us the possibility of deploying this for other registers depending on the CPU selection. Reported-by: Will Deacon <will.deacon@arm.com> Tested-by: Stephen Warren <swarren@nvidia.com> # Tegra Jetson TK1 Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> # mioa701_bootresume.S Tested-by: Andrew Lunn <andrew@lunn.ch> # Kirkwood Tested-by: Shawn Guo <shawn.guo@freescale.com> Tested-by: Tony Lindgren <tony@atomide.com> # OMAPs Tested-by: Gregory CLEMENT <gregory.clement@free-electrons.com> # Armada XP, 375, 385 Acked-by: Sekhar Nori <nsekhar@ti.com> # DaVinci Acked-by: Christoffer Dall <christoffer.dall@linaro.org> # kvm/hyp Acked-by: Haojian Zhuang <haojian.zhuang@gmail.com> # PXA3xx Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> # Xen Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> # ARMv7M Tested-by: Simon Horman <horms+renesas@verge.net.au> # Shmobile Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
527 lines
14 KiB
ArmAsm
527 lines
14 KiB
ArmAsm
/*
|
|
* linux/arch/arm/mm/arm925.S: MMU functions for ARM925
|
|
*
|
|
* Copyright (C) 1999,2000 ARM Limited
|
|
* Copyright (C) 2000 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2002 RidgeRun, Inc.
|
|
* Copyright (C) 2002-2003 MontaVista Software, Inc.
|
|
*
|
|
* Update for Linux-2.6 and cache flush improvements
|
|
* Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
|
|
*
|
|
* hacked for non-paged-MM by Hyok S. Choi, 2004.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*
|
|
*
|
|
* These are the low level assembler for performing cache and TLB
|
|
* functions on the arm925.
|
|
*
|
|
* CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
|
|
*
|
|
* Some additional notes based on deciphering the TI TRM on OMAP-5910:
|
|
*
|
|
* NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
|
|
* entry mode" must be 0 to flush the entries in both segments
|
|
* at once. This is the default value. See TRM 2-20 and 2-24 for
|
|
* more information.
|
|
*
|
|
* NOTE2: Default is the "D-cache clean and flush entry mode". It looks
|
|
* like the "Transparent mode" must be on for partial cache flushes
|
|
* to work in this mode. This mode only works with 16-bit external
|
|
* memory. See TRM 2-24 for more information.
|
|
*
|
|
* NOTE3: Write-back cache flushing seems to be flakey with devices using
|
|
* direct memory access, such as USB OHCI. The workaround is to use
|
|
* write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
|
|
* the default for OMAP-1510).
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/hwcap.h>
|
|
#include <asm/pgtable-hwdef.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/page.h>
|
|
#include <asm/ptrace.h>
|
|
#include "proc-macros.S"
|
|
|
|
/*
|
|
* The size of one data cache line.
|
|
*/
|
|
#define CACHE_DLINESIZE 16
|
|
|
|
/*
|
|
* The number of data cache segments.
|
|
*/
|
|
#define CACHE_DSEGMENTS 2
|
|
|
|
/*
|
|
* The number of lines in a cache segment.
|
|
*/
|
|
#define CACHE_DENTRIES 256
|
|
|
|
/*
|
|
* This is the size at which it becomes more efficient to
|
|
* clean the whole cache, rather than using the individual
|
|
* cache line maintenance instructions.
|
|
*/
|
|
#define CACHE_DLIMIT 8192
|
|
|
|
.text
|
|
/*
|
|
* cpu_arm925_proc_init()
|
|
*/
|
|
ENTRY(cpu_arm925_proc_init)
|
|
ret lr
|
|
|
|
/*
|
|
* cpu_arm925_proc_fin()
|
|
*/
|
|
ENTRY(cpu_arm925_proc_fin)
|
|
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
|
|
bic r0, r0, #0x1000 @ ...i............
|
|
bic r0, r0, #0x000e @ ............wca.
|
|
mcr p15, 0, r0, c1, c0, 0 @ disable caches
|
|
ret lr
|
|
|
|
/*
|
|
* cpu_arm925_reset(loc)
|
|
*
|
|
* Perform a soft reset of the system. Put the CPU into the
|
|
* same state as it would be if it had been reset, and branch
|
|
* to what would be the reset vector.
|
|
*
|
|
* loc: location to jump to for soft reset
|
|
*/
|
|
.align 5
|
|
.pushsection .idmap.text, "ax"
|
|
ENTRY(cpu_arm925_reset)
|
|
/* Send software reset to MPU and DSP */
|
|
mov ip, #0xff000000
|
|
orr ip, ip, #0x00fe0000
|
|
orr ip, ip, #0x0000ce00
|
|
mov r4, #1
|
|
strh r4, [ip, #0x10]
|
|
ENDPROC(cpu_arm925_reset)
|
|
.popsection
|
|
|
|
mov ip, #0
|
|
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
|
|
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
|
#ifdef CONFIG_MMU
|
|
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
|
|
#endif
|
|
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
|
|
bic ip, ip, #0x000f @ ............wcam
|
|
bic ip, ip, #0x1100 @ ...i...s........
|
|
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
|
|
ret r0
|
|
|
|
/*
|
|
* cpu_arm925_do_idle()
|
|
*
|
|
* Called with IRQs disabled
|
|
*/
|
|
.align 10
|
|
ENTRY(cpu_arm925_do_idle)
|
|
mov r0, #0
|
|
mrc p15, 0, r1, c1, c0, 0 @ Read control register
|
|
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
|
|
bic r2, r1, #1 << 12
|
|
mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
|
|
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
|
|
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
|
|
ret lr
|
|
|
|
/*
|
|
* flush_icache_all()
|
|
*
|
|
* Unconditionally clean and invalidate the entire icache.
|
|
*/
|
|
ENTRY(arm925_flush_icache_all)
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
|
ret lr
|
|
ENDPROC(arm925_flush_icache_all)
|
|
|
|
/*
|
|
* flush_user_cache_all()
|
|
*
|
|
* Clean and invalidate all cache entries in a particular
|
|
* address space.
|
|
*/
|
|
ENTRY(arm925_flush_user_cache_all)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* flush_kern_cache_all()
|
|
*
|
|
* Clean and invalidate the entire cache.
|
|
*/
|
|
ENTRY(arm925_flush_kern_cache_all)
|
|
mov r2, #VM_EXEC
|
|
mov ip, #0
|
|
__flush_whole_cache:
|
|
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
|
|
#else
|
|
/* Flush entries in both segments at once, see NOTE1 above */
|
|
mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment
|
|
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
|
|
subs r3, r3, #1 << 4
|
|
bcs 2b @ entries 255 to 0
|
|
#endif
|
|
tst r2, #VM_EXEC
|
|
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
|
|
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
|
|
ret lr
|
|
|
|
/*
|
|
* flush_user_cache_range(start, end, flags)
|
|
*
|
|
* Clean and invalidate a range of cache entries in the
|
|
* specified address range.
|
|
*
|
|
* - start - start address (inclusive)
|
|
* - end - end address (exclusive)
|
|
* - flags - vm_flags describing address space
|
|
*/
|
|
ENTRY(arm925_flush_user_cache_range)
|
|
mov ip, #0
|
|
sub r3, r1, r0 @ calculate total size
|
|
cmp r3, #CACHE_DLIMIT
|
|
bgt __flush_whole_cache
|
|
1: tst r2, #VM_EXEC
|
|
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
|
|
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
|
|
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
#else
|
|
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
|
|
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
|
|
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
#endif
|
|
cmp r0, r1
|
|
blo 1b
|
|
tst r2, #VM_EXEC
|
|
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
|
|
ret lr
|
|
|
|
/*
|
|
* coherent_kern_range(start, end)
|
|
*
|
|
* Ensure coherency between the Icache and the Dcache in the
|
|
* region described by start, end. If you have non-snooping
|
|
* Harvard caches, you need to implement this function.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*/
|
|
ENTRY(arm925_coherent_kern_range)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* coherent_user_range(start, end)
|
|
*
|
|
* Ensure coherency between the Icache and the Dcache in the
|
|
* region described by start, end. If you have non-snooping
|
|
* Harvard caches, you need to implement this function.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*/
|
|
ENTRY(arm925_coherent_user_range)
|
|
bic r0, r0, #CACHE_DLINESIZE - 1
|
|
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
|
|
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
|
mov r0, #0
|
|
ret lr
|
|
|
|
/*
|
|
* flush_kern_dcache_area(void *addr, size_t size)
|
|
*
|
|
* Ensure no D cache aliasing occurs, either with itself or
|
|
* the I cache
|
|
*
|
|
* - addr - kernel address
|
|
* - size - region size
|
|
*/
|
|
ENTRY(arm925_flush_kern_dcache_area)
|
|
add r1, r0, r1
|
|
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
|
ret lr
|
|
|
|
/*
|
|
* dma_inv_range(start, end)
|
|
*
|
|
* Invalidate (discard) the specified virtual address range.
|
|
* May not write back any entries. If 'start' or 'end'
|
|
* are not cache line aligned, those lines must be written
|
|
* back.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*
|
|
* (same as v4wb)
|
|
*/
|
|
arm925_dma_inv_range:
|
|
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
tst r0, #CACHE_DLINESIZE - 1
|
|
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
|
|
tst r1, #CACHE_DLINESIZE - 1
|
|
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
|
|
#endif
|
|
bic r0, r0, #CACHE_DLINESIZE - 1
|
|
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
|
ret lr
|
|
|
|
/*
|
|
* dma_clean_range(start, end)
|
|
*
|
|
* Clean the specified virtual address range.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*
|
|
* (same as v4wb)
|
|
*/
|
|
arm925_dma_clean_range:
|
|
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
bic r0, r0, #CACHE_DLINESIZE - 1
|
|
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
#endif
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
|
ret lr
|
|
|
|
/*
|
|
* dma_flush_range(start, end)
|
|
*
|
|
* Clean and invalidate the specified virtual address range.
|
|
*
|
|
* - start - virtual start address
|
|
* - end - virtual end address
|
|
*/
|
|
ENTRY(arm925_dma_flush_range)
|
|
bic r0, r0, #CACHE_DLINESIZE - 1
|
|
1:
|
|
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
|
|
#else
|
|
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
|
|
#endif
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
cmp r0, r1
|
|
blo 1b
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
|
ret lr
|
|
|
|
/*
|
|
* dma_map_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
ENTRY(arm925_dma_map_area)
|
|
add r1, r1, r0
|
|
cmp r2, #DMA_TO_DEVICE
|
|
beq arm925_dma_clean_range
|
|
bcs arm925_dma_inv_range
|
|
b arm925_dma_flush_range
|
|
ENDPROC(arm925_dma_map_area)
|
|
|
|
/*
|
|
* dma_unmap_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
ENTRY(arm925_dma_unmap_area)
|
|
ret lr
|
|
ENDPROC(arm925_dma_unmap_area)
|
|
|
|
.globl arm925_flush_kern_cache_louis
|
|
.equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
|
|
|
|
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
|
|
define_cache_functions arm925
|
|
|
|
ENTRY(cpu_arm925_dcache_clean_area)
|
|
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
|
|
add r0, r0, #CACHE_DLINESIZE
|
|
subs r1, r1, #CACHE_DLINESIZE
|
|
bhi 1b
|
|
#endif
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
|
ret lr
|
|
|
|
/* =============================== PageTable ============================== */
|
|
|
|
/*
|
|
* cpu_arm925_switch_mm(pgd)
|
|
*
|
|
* Set the translation base pointer to be as described by pgd.
|
|
*
|
|
* pgd: new page tables
|
|
*/
|
|
.align 5
|
|
ENTRY(cpu_arm925_switch_mm)
|
|
#ifdef CONFIG_MMU
|
|
mov ip, #0
|
|
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
|
|
#else
|
|
/* Flush entries in bothe segments at once, see NOTE1 above */
|
|
mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment
|
|
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
|
|
subs r3, r3, #1 << 4
|
|
bcs 2b @ entries 255 to 0
|
|
#endif
|
|
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
|
|
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
|
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
|
|
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
|
|
#endif
|
|
ret lr
|
|
|
|
/*
|
|
* cpu_arm925_set_pte_ext(ptep, pte, ext)
|
|
*
|
|
* Set a PTE and flush it out
|
|
*/
|
|
.align 5
|
|
ENTRY(cpu_arm925_set_pte_ext)
|
|
#ifdef CONFIG_MMU
|
|
armv3_set_pte_ext
|
|
mov r0, r0
|
|
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
|
|
#endif
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
|
#endif /* CONFIG_MMU */
|
|
ret lr
|
|
|
|
.type __arm925_setup, #function
|
|
__arm925_setup:
|
|
mov r0, #0
|
|
#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
|
|
orr r0,r0,#1 << 7
|
|
#endif
|
|
|
|
/* Transparent on, D-cache clean & flush mode. See NOTE2 above */
|
|
orr r0,r0,#1 << 1 @ transparent mode on
|
|
mcr p15, 0, r0, c15, c1, 0 @ write TI config register
|
|
|
|
mov r0, #0
|
|
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
|
|
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
|
|
#ifdef CONFIG_MMU
|
|
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
|
mov r0, #4 @ disable write-back on caches explicitly
|
|
mcr p15, 7, r0, c15, c0, 0
|
|
#endif
|
|
|
|
adr r5, arm925_crval
|
|
ldmia r5, {r5, r6}
|
|
mrc p15, 0, r0, c1, c0 @ get control register v4
|
|
bic r0, r0, r5
|
|
orr r0, r0, r6
|
|
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
|
|
orr r0, r0, #0x4000 @ .1.. .... .... ....
|
|
#endif
|
|
ret lr
|
|
.size __arm925_setup, . - __arm925_setup
|
|
|
|
/*
|
|
* R
|
|
* .RVI ZFRS BLDP WCAM
|
|
* .011 0001 ..11 1101
|
|
*
|
|
*/
|
|
.type arm925_crval, #object
|
|
arm925_crval:
|
|
crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
|
|
|
|
__INITDATA
|
|
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
|
|
define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort
|
|
|
|
.section ".rodata"
|
|
|
|
string cpu_arch_name, "armv4t"
|
|
string cpu_elf_name, "v4"
|
|
string cpu_arm925_name, "ARM925T"
|
|
|
|
.align
|
|
|
|
.section ".proc.info.init", #alloc, #execinstr
|
|
|
|
.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
|
|
.type __\name\()_proc_info,#object
|
|
__\name\()_proc_info:
|
|
.long \cpu_val
|
|
.long \cpu_mask
|
|
.long PMD_TYPE_SECT | \
|
|
PMD_SECT_CACHEABLE | \
|
|
PMD_BIT4 | \
|
|
PMD_SECT_AP_WRITE | \
|
|
PMD_SECT_AP_READ
|
|
.long PMD_TYPE_SECT | \
|
|
PMD_BIT4 | \
|
|
PMD_SECT_AP_WRITE | \
|
|
PMD_SECT_AP_READ
|
|
b __arm925_setup
|
|
.long cpu_arch_name
|
|
.long cpu_elf_name
|
|
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
|
|
.long cpu_arm925_name
|
|
.long arm925_processor_functions
|
|
.long v4wbi_tlb_fns
|
|
.long v4wb_user_fns
|
|
.long arm925_cache_fns
|
|
.size __\name\()_proc_info, . - __\name\()_proc_info
|
|
.endm
|
|
|
|
arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name
|
|
arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name
|