2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/head.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994-2002 Russell King
|
2005-06-18 08:33:31 +00:00
|
|
|
* Copyright (c) 2003 ARM Limited
|
|
|
|
* All Rights Reserved
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Kernel startup code for all 32-bit CPUs
|
|
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/domain.h>
|
|
|
|
#include <asm/ptrace.h>
|
2005-09-09 19:08:59 +00:00
|
|
|
#include <asm/asm-offsets.h>
|
2005-10-29 20:44:55 +00:00
|
|
|
#include <asm/memory.h>
|
2005-05-05 12:11:00 +00:00
|
|
|
#include <asm/thread_info.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/system.h>
|
|
|
|
|
2010-07-07 03:19:48 +00:00
|
|
|
#ifdef CONFIG_DEBUG_LL
|
|
|
|
#include <mach/debug-macro.S>
|
|
|
|
#endif
|
|
|
|
|
2007-01-21 19:08:33 +00:00
|
|
|
#if (PHYS_OFFSET & 0x001fffff)
|
|
|
|
#error "PHYS_OFFSET must be at an even 2MiB boundary!"
|
|
|
|
#endif
|
|
|
|
|
2006-12-11 22:29:16 +00:00
|
|
|
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
|
|
|
|
#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)
|
2006-01-03 17:28:33 +00:00
|
|
|
|
2007-05-31 21:02:22 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-29 20:44:56 +00:00
|
|
|
* swapper_pg_dir is the virtual address of the initial page table.
|
2006-12-11 22:29:16 +00:00
|
|
|
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
|
|
|
|
* make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
|
2005-10-29 20:44:56 +00:00
|
|
|
* the least significant 16 bits to be 0x8000, but we could probably
|
2006-12-11 22:29:16 +00:00
|
|
|
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2006-12-11 22:29:16 +00:00
|
|
|
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
|
|
|
|
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
.globl swapper_pg_dir
|
2006-12-11 22:29:16 +00:00
|
|
|
.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-29 20:44:56 +00:00
|
|
|
.macro pgtbl, rd
|
2006-12-11 22:29:16 +00:00
|
|
|
ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)
|
2005-04-16 22:20:36 +00:00
|
|
|
.endm
|
|
|
|
|
2005-10-29 20:44:56 +00:00
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
2007-02-22 15:18:09 +00:00
|
|
|
#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
|
|
|
|
#define KERNEL_END _edata_loc
|
2005-10-29 20:44:56 +00:00
|
|
|
#else
|
2007-02-22 15:18:09 +00:00
|
|
|
#define KERNEL_START KERNEL_RAM_VADDR
|
|
|
|
#define KERNEL_END _end
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernel startup entry point.
|
|
|
|
* ---------------------------
|
|
|
|
*
|
|
|
|
* This is normally called from the decompressor code. The requirements
|
|
|
|
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
|
2007-05-31 21:02:22 +00:00
|
|
|
* r1 = machine nr, r2 = atags pointer.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This code is mostly position independent, so if you link the kernel at
|
|
|
|
* 0xc0008000, you call this at __pa(0xc0008000).
|
|
|
|
*
|
|
|
|
* See linux/arch/arm/tools/mach-types for the complete list of machine
|
|
|
|
* numbers for r1.
|
|
|
|
*
|
|
|
|
* We're trying to keep crap to a minimum; DO NOT add any machine specific
|
|
|
|
* crap here - that's what the boot loader (or in extreme, well justified
|
|
|
|
* circumstances, zImage) is for.
|
|
|
|
*/
|
2009-10-02 20:32:46 +00:00
|
|
|
__HEAD
|
2005-04-16 22:20:36 +00:00
|
|
|
ENTRY(stext)
|
2009-07-24 11:32:54 +00:00
|
|
|
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
|
2005-04-16 22:20:36 +00:00
|
|
|
@ and irqs disabled
|
2006-02-24 21:04:56 +00:00
|
|
|
mrc p15, 0, r9, c0, c0 @ get processor id
|
2005-04-16 22:20:36 +00:00
|
|
|
bl __lookup_processor_type @ r5=procinfo r9=cpuid
|
|
|
|
movs r10, r5 @ invalid processor (r5=0)?
|
2010-11-29 18:43:28 +00:00
|
|
|
THUMB( it eq ) @ force fixup-able long branch encoding
|
2005-11-25 15:43:22 +00:00
|
|
|
beq __error_p @ yes, error 'p'
|
2005-04-16 22:20:36 +00:00
|
|
|
bl __lookup_machine_type @ r5=machinfo
|
|
|
|
movs r8, r5 @ invalid machine (r5=0)?
|
2010-11-29 18:43:28 +00:00
|
|
|
THUMB( it eq ) @ force fixup-able long branch encoding
|
2005-04-16 22:20:36 +00:00
|
|
|
beq __error_a @ yes, error 'a'
|
2010-11-22 12:06:28 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* r1 = machine no, r2 = atags,
|
|
|
|
* r8 = machinfo, r9 = cpuid, r10 = procinfo
|
|
|
|
*/
|
2007-05-31 21:02:22 +00:00
|
|
|
bl __vet_atags
|
2010-09-04 09:47:48 +00:00
|
|
|
#ifdef CONFIG_SMP_ON_UP
|
|
|
|
bl __fixup_smp
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
bl __create_page_tables
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following calls CPU specific code in a position independent
|
|
|
|
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
|
|
|
|
* xxx_proc_info structure selected by __lookup_machine_type
|
|
|
|
* above. On return, the CPU will be ready for the MMU to be
|
|
|
|
* turned on, and r0 will hold the CPU control register value.
|
|
|
|
*/
|
2010-10-04 15:22:34 +00:00
|
|
|
ldr r13, =__mmap_switched @ address to jump to after
|
2005-04-16 22:20:36 +00:00
|
|
|
@ mmu has been enabled
|
2010-10-04 16:56:13 +00:00
|
|
|
adr lr, BSYM(1f) @ return (PIC) address
|
2009-07-24 11:32:54 +00:00
|
|
|
ARM( add pc, r10, #PROCINFO_INITFUNC )
|
|
|
|
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
|
|
|
THUMB( mov pc, r12 )
|
2010-10-04 16:56:13 +00:00
|
|
|
1: b __enable_mmu
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(stext)
|
2010-10-04 15:22:34 +00:00
|
|
|
.ltorg
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup the initial page tables. We only setup the barest
|
|
|
|
* amount which are required to get the kernel running, which
|
|
|
|
* generally means mapping in the kernel code.
|
|
|
|
*
|
|
|
|
* r8 = machinfo
|
|
|
|
* r9 = cpuid
|
|
|
|
* r10 = procinfo
|
|
|
|
*
|
|
|
|
* Returns:
|
2010-10-04 16:51:54 +00:00
|
|
|
* r0, r3, r5-r7 corrupted
|
2005-04-16 22:20:36 +00:00
|
|
|
* r4 = physical page table address
|
|
|
|
*/
|
|
|
|
__create_page_tables:
|
2005-10-29 20:44:56 +00:00
|
|
|
pgtbl r4 @ page table address
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the 16K level 1 swapper page table
|
|
|
|
*/
|
|
|
|
mov r0, r4
|
|
|
|
mov r3, #0
|
|
|
|
add r6, r0, #0x4000
|
|
|
|
1: str r3, [r0], #4
|
|
|
|
str r3, [r0], #4
|
|
|
|
str r3, [r0], #4
|
|
|
|
str r3, [r0], #4
|
|
|
|
teq r0, r6
|
|
|
|
bne 1b
|
|
|
|
|
2006-06-29 17:24:21 +00:00
|
|
|
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2010-10-04 16:51:54 +00:00
|
|
|
* Create identity mapping to cater for __enable_mmu.
|
|
|
|
* This identity mapping will be removed by paging_init().
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2010-10-04 16:51:54 +00:00
|
|
|
adr r0, __enable_mmu_loc
|
|
|
|
ldmia r0, {r3, r5, r6}
|
|
|
|
sub r0, r0, r3 @ virt->phys offset
|
|
|
|
add r5, r5, r0 @ phys __enable_mmu
|
|
|
|
add r6, r6, r0 @ phys __enable_mmu_end
|
|
|
|
mov r5, r5, lsr #20
|
|
|
|
mov r6, r6, lsr #20
|
|
|
|
|
|
|
|
1: orr r3, r7, r5, lsl #20 @ flags + kernel base
|
|
|
|
str r3, [r4, r5, lsl #2] @ identity mapping
|
|
|
|
teq r5, r6
|
|
|
|
addne r5, r5, #1 @ next section
|
|
|
|
bne 1b
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now setup the pagetables for our kernel direct
|
2006-09-29 20:14:05 +00:00
|
|
|
* mapped region.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2010-10-04 16:51:54 +00:00
|
|
|
mov r3, pc
|
|
|
|
mov r3, r3, lsr #20
|
|
|
|
orr r3, r7, r3, lsl #20
|
2007-02-22 15:18:09 +00:00
|
|
|
add r0, r4, #(KERNEL_START & 0xff000000) >> 18
|
|
|
|
str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]!
|
|
|
|
ldr r6, =(KERNEL_END - 1)
|
|
|
|
add r0, r0, #4
|
|
|
|
add r6, r4, r6, lsr #18
|
|
|
|
1: cmp r0, r6
|
|
|
|
add r3, r3, #1 << 20
|
|
|
|
strls r3, [r0], #4
|
|
|
|
bls 1b
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-02-21 14:32:28 +00:00
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
|
|
/*
|
|
|
|
* Map some ram to cover our .data and .bss areas.
|
|
|
|
*/
|
|
|
|
orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000)
|
2007-02-21 14:58:13 +00:00
|
|
|
.if (KERNEL_RAM_PADDR & 0x00f00000)
|
2007-02-21 14:32:28 +00:00
|
|
|
orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000)
|
2007-02-21 14:58:13 +00:00
|
|
|
.endif
|
2007-02-21 14:32:28 +00:00
|
|
|
add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18
|
|
|
|
str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
|
|
|
|
ldr r6, =(_end - 1)
|
|
|
|
add r0, r0, #4
|
|
|
|
add r6, r4, r6, lsr #18
|
|
|
|
1: cmp r0, r6
|
|
|
|
add r3, r3, #1 << 20
|
|
|
|
strls r3, [r0], #4
|
|
|
|
bls 1b
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Then map first 1MB of ram in case it contains our boot params.
|
|
|
|
*/
|
2005-10-29 20:44:55 +00:00
|
|
|
add r0, r4, #PAGE_OFFSET >> 18
|
2007-01-21 19:08:33 +00:00
|
|
|
orr r6, r7, #(PHYS_OFFSET & 0xff000000)
|
2007-02-21 14:58:13 +00:00
|
|
|
.if (PHYS_OFFSET & 0x00f00000)
|
|
|
|
orr r6, r6, #(PHYS_OFFSET & 0x00f00000)
|
|
|
|
.endif
|
2005-04-16 22:20:36 +00:00
|
|
|
str r6, [r0]
|
|
|
|
|
2005-07-01 10:56:55 +00:00
|
|
|
#ifdef CONFIG_DEBUG_LL
|
2010-07-07 03:19:48 +00:00
|
|
|
#ifndef CONFIG_DEBUG_ICEDCC
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Map in IO space for serial debugging.
|
|
|
|
* This allows debug messages to be output
|
|
|
|
* via a serial console before paging_init.
|
|
|
|
*/
|
2010-07-07 03:19:48 +00:00
|
|
|
addruart r7, r3
|
|
|
|
|
|
|
|
mov r3, r3, lsr #20
|
|
|
|
mov r3, r3, lsl #2
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
add r0, r4, r3
|
|
|
|
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
|
|
|
|
cmp r3, #0x0800 @ limit to 512MB
|
|
|
|
movhi r3, #0x0800
|
|
|
|
add r6, r0, r3
|
2010-07-07 03:19:48 +00:00
|
|
|
mov r3, r7, lsr #20
|
|
|
|
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
|
|
|
|
orr r3, r7, r3, lsl #20
|
2005-04-16 22:20:36 +00:00
|
|
|
1: str r3, [r0], #4
|
|
|
|
add r3, r3, #1 << 20
|
|
|
|
teq r0, r6
|
|
|
|
bne 1b
|
2010-07-07 03:19:48 +00:00
|
|
|
|
|
|
|
#else /* CONFIG_DEBUG_ICEDCC */
|
|
|
|
/* we don't need any serial debugging mappings for ICEDCC */
|
|
|
|
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
|
|
|
|
#endif /* !CONFIG_DEBUG_ICEDCC */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
|
|
|
|
/*
|
2005-11-25 15:43:22 +00:00
|
|
|
* If we're using the NetWinder or CATS, we also need to map
|
|
|
|
* in the 16550-type serial port for the debug messages
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-07-01 10:56:55 +00:00
|
|
|
add r0, r4, #0xff000000 >> 18
|
|
|
|
orr r3, r7, #0x7c000000
|
|
|
|
str r3, [r0]
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_RPC
|
|
|
|
/*
|
|
|
|
* Map in screen at 0x02000000 & SCREEN2_BASE
|
|
|
|
* Similar reasons here - for debug. This is
|
|
|
|
* only for Acorn RiscPC architectures.
|
|
|
|
*/
|
2005-07-01 10:56:55 +00:00
|
|
|
add r0, r4, #0x02000000 >> 18
|
|
|
|
orr r3, r7, #0x02000000
|
2005-04-16 22:20:36 +00:00
|
|
|
str r3, [r0]
|
2005-07-01 10:56:55 +00:00
|
|
|
add r0, r4, #0xd8000000 >> 18
|
2005-04-16 22:20:36 +00:00
|
|
|
str r3, [r0]
|
2005-07-01 10:56:55 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
mov pc, lr
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(__create_page_tables)
|
2005-04-16 22:20:36 +00:00
|
|
|
.ltorg
|
2010-11-29 18:43:24 +00:00
|
|
|
.align
|
2010-10-04 16:51:54 +00:00
|
|
|
__enable_mmu_loc:
|
|
|
|
.long .
|
|
|
|
.long __enable_mmu
|
|
|
|
.long __enable_mmu_end
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-10-04 16:56:13 +00:00
|
|
|
#if defined(CONFIG_SMP)
|
|
|
|
__CPUINIT
|
|
|
|
ENTRY(secondary_startup)
|
|
|
|
/*
|
|
|
|
* Common entry point for secondary CPUs.
|
|
|
|
*
|
|
|
|
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
|
|
|
|
* the processor type - there is no need to check the machine type
|
|
|
|
* as it has already been validated by the primary processor.
|
|
|
|
*/
|
|
|
|
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
|
|
|
|
mrc p15, 0, r9, c0, c0 @ get processor id
|
|
|
|
bl __lookup_processor_type
|
|
|
|
movs r10, r5 @ invalid processor?
|
|
|
|
moveq r0, #'p' @ yes, error 'p'
|
2010-11-29 18:43:28 +00:00
|
|
|
THUMB( it eq ) @ force fixup-able long branch encoding
|
2010-10-04 16:56:13 +00:00
|
|
|
beq __error_p
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the page tables supplied from __cpu_up.
|
|
|
|
*/
|
|
|
|
adr r4, __secondary_data
|
|
|
|
ldmia r4, {r5, r7, r12} @ address to jump to after
|
|
|
|
sub r4, r4, r5 @ mmu has been enabled
|
|
|
|
ldr r4, [r7, r4] @ get secondary_data.pgdir
|
|
|
|
adr lr, BSYM(__enable_mmu) @ return address
|
|
|
|
mov r13, r12 @ __secondary_switched address
|
|
|
|
ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
|
|
|
|
@ (return control reg)
|
|
|
|
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
|
|
|
THUMB( mov pc, r12 )
|
|
|
|
ENDPROC(secondary_startup)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* r6 = &secondary_data
|
|
|
|
*/
|
|
|
|
ENTRY(__secondary_switched)
|
|
|
|
ldr sp, [r7, #4] @ get secondary_data.stack
|
|
|
|
mov fp, #0
|
|
|
|
b secondary_start_kernel
|
|
|
|
ENDPROC(__secondary_switched)
|
|
|
|
|
2010-11-29 18:43:24 +00:00
|
|
|
.align
|
|
|
|
|
2010-10-04 16:56:13 +00:00
|
|
|
.type __secondary_data, %object
|
|
|
|
__secondary_data:
|
|
|
|
.long .
|
|
|
|
.long secondary_data
|
|
|
|
.long __secondary_switched
|
|
|
|
#endif /* defined(CONFIG_SMP) */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup common bits before finally enabling the MMU. Essentially
|
|
|
|
* this is just loading the page table pointer and domain access
|
|
|
|
* registers.
|
2010-10-04 17:02:59 +00:00
|
|
|
*
|
|
|
|
* r0 = cp#15 control register
|
|
|
|
* r1 = machine ID
|
|
|
|
* r2 = atags pointer
|
|
|
|
* r4 = page table pointer
|
|
|
|
* r9 = processor ID
|
|
|
|
* r13 = *virtual* address to jump to upon completion
|
2010-10-04 16:56:13 +00:00
|
|
|
*/
|
|
|
|
__enable_mmu:
|
|
|
|
#ifdef CONFIG_ALIGNMENT_TRAP
|
|
|
|
orr r0, r0, #CR_A
|
|
|
|
#else
|
|
|
|
bic r0, r0, #CR_A
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_DCACHE_DISABLE
|
|
|
|
bic r0, r0, #CR_C
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_BPREDICT_DISABLE
|
|
|
|
bic r0, r0, #CR_Z
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_ICACHE_DISABLE
|
|
|
|
bic r0, r0, #CR_I
|
|
|
|
#endif
|
|
|
|
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
|
|
|
|
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
|
|
|
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
|
|
|
|
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
|
|
|
|
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
|
|
|
|
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
|
|
|
|
b __turn_mmu_on
|
|
|
|
ENDPROC(__enable_mmu)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable the MMU. This completely changes the structure of the visible
|
|
|
|
* memory space. You will not be able to trace execution through this.
|
|
|
|
* If you have an enquiry about this, *please* check the linux-arm-kernel
|
|
|
|
* mailing list archives BEFORE sending another post to the list.
|
|
|
|
*
|
|
|
|
* r0 = cp#15 control register
|
2010-10-04 17:02:59 +00:00
|
|
|
* r1 = machine ID
|
|
|
|
* r2 = atags pointer
|
|
|
|
* r9 = processor ID
|
2010-10-04 16:56:13 +00:00
|
|
|
* r13 = *virtual* address to jump to upon completion
|
|
|
|
*
|
|
|
|
* other registers depend on the function called upon completion
|
|
|
|
*/
|
|
|
|
.align 5
|
|
|
|
__turn_mmu_on:
|
|
|
|
mov r0, r0
|
|
|
|
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
|
|
|
mrc p15, 0, r3, c0, c0, 0 @ read id reg
|
|
|
|
mov r3, r3
|
|
|
|
mov r3, r13
|
|
|
|
mov pc, r3
|
|
|
|
__enable_mmu_end:
|
|
|
|
ENDPROC(__turn_mmu_on)
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-09-04 09:47:48 +00:00
|
|
|
#ifdef CONFIG_SMP_ON_UP
|
|
|
|
__fixup_smp:
|
2010-11-22 12:06:28 +00:00
|
|
|
mov r4, #0x00070000
|
|
|
|
orr r3, r4, #0xff000000 @ mask 0xff070000
|
|
|
|
orr r4, r4, #0x41000000 @ val 0x41070000
|
|
|
|
and r0, r9, r3
|
|
|
|
teq r0, r4 @ ARM CPU and ARMv6/v7?
|
2010-09-04 09:47:48 +00:00
|
|
|
bne __fixup_smp_on_up @ no, assume UP
|
|
|
|
|
2010-11-22 12:06:28 +00:00
|
|
|
orr r3, r3, #0x0000ff00
|
|
|
|
orr r3, r3, #0x000000f0 @ mask 0xff07fff0
|
|
|
|
orr r4, r4, #0x0000b000
|
|
|
|
orr r4, r4, #0x00000020 @ val 0x4107b020
|
|
|
|
and r0, r9, r3
|
|
|
|
teq r0, r4 @ ARM 11MPCore?
|
2010-09-04 09:47:48 +00:00
|
|
|
moveq pc, lr @ yes, assume SMP
|
|
|
|
|
|
|
|
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
|
|
|
|
tst r0, #1 << 31
|
|
|
|
movne pc, lr @ bit 31 => SMP
|
|
|
|
|
|
|
|
__fixup_smp_on_up:
|
|
|
|
adr r0, 1f
|
2010-11-22 12:06:28 +00:00
|
|
|
ldmia r0, {r3 - r5}
|
2010-09-04 09:47:48 +00:00
|
|
|
sub r3, r0, r3
|
2010-11-22 12:06:28 +00:00
|
|
|
add r4, r4, r3
|
|
|
|
add r5, r5, r3
|
|
|
|
2: cmp r4, r5
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 14:39:23 +00:00
|
|
|
movhs pc, lr
|
2010-11-22 12:06:28 +00:00
|
|
|
ldmia r4!, {r0, r6}
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 14:39:23 +00:00
|
|
|
ARM( str r6, [r0, r3] )
|
|
|
|
THUMB( add r0, r0, r3 )
|
|
|
|
#ifdef __ARMEB__
|
|
|
|
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
|
|
|
|
#endif
|
|
|
|
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
|
|
|
|
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
|
|
|
|
THUMB( strh r6, [r0] )
|
|
|
|
b 2b
|
2010-09-04 09:47:48 +00:00
|
|
|
ENDPROC(__fixup_smp)
|
|
|
|
|
2010-11-29 18:43:24 +00:00
|
|
|
.align
|
2010-09-04 09:47:48 +00:00
|
|
|
1: .word .
|
|
|
|
.word __smpalt_begin
|
|
|
|
.word __smpalt_end
|
|
|
|
|
|
|
|
.pushsection .data
|
|
|
|
.globl smp_on_up
|
|
|
|
smp_on_up:
|
|
|
|
ALT_SMP(.long 1)
|
|
|
|
ALT_UP(.long 0)
|
|
|
|
.popsection
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2006-03-27 13:58:25 +00:00
|
|
|
#include "head-common.S"
|