2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/head.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994-2002 Russell King
|
2005-06-18 08:33:31 +00:00
|
|
|
* Copyright (c) 2003 ARM Limited
|
|
|
|
* All Rights Reserved
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Kernel startup code for all 32-bit CPUs
|
|
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/domain.h>
|
|
|
|
#include <asm/ptrace.h>
|
2005-09-09 19:08:59 +00:00
|
|
|
#include <asm/asm-offsets.h>
|
2005-10-29 20:44:55 +00:00
|
|
|
#include <asm/memory.h>
|
2005-05-05 12:11:00 +00:00
|
|
|
#include <asm/thread_info.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/system.h>
|
|
|
|
|
2010-07-07 03:19:48 +00:00
|
|
|
#ifdef CONFIG_DEBUG_LL
|
|
|
|
#include <mach/debug-macro.S>
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-29 20:44:56 +00:00
|
|
|
* swapper_pg_dir is the virtual address of the initial page table.
|
2006-12-11 22:29:16 +00:00
|
|
|
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
|
|
|
|
* make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
|
2005-10-29 20:44:56 +00:00
|
|
|
* the least significant 16 bits to be 0x8000, but we could probably
|
2006-12-11 22:29:16 +00:00
|
|
|
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
|
2006-12-11 22:29:16 +00:00
|
|
|
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
|
|
|
|
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
.globl swapper_pg_dir
|
2006-12-11 22:29:16 +00:00
|
|
|
.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
.macro pgtbl, rd, phys
|
|
|
|
add \rd, \phys, #TEXT_OFFSET - 0x4000
|
2005-04-16 22:20:36 +00:00
|
|
|
.endm
|
|
|
|
|
2005-10-29 20:44:56 +00:00
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
2007-02-22 15:18:09 +00:00
|
|
|
#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
|
|
|
|
#define KERNEL_END _edata_loc
|
2005-10-29 20:44:56 +00:00
|
|
|
#else
|
2007-02-22 15:18:09 +00:00
|
|
|
#define KERNEL_START KERNEL_RAM_VADDR
|
|
|
|
#define KERNEL_END _end
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernel startup entry point.
|
|
|
|
* ---------------------------
|
|
|
|
*
|
|
|
|
* This is normally called from the decompressor code. The requirements
|
|
|
|
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
|
2007-05-31 21:02:22 +00:00
|
|
|
* r1 = machine nr, r2 = atags pointer.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This code is mostly position independent, so if you link the kernel at
|
|
|
|
* 0xc0008000, you call this at __pa(0xc0008000).
|
|
|
|
*
|
|
|
|
* See linux/arch/arm/tools/mach-types for the complete list of machine
|
|
|
|
* numbers for r1.
|
|
|
|
*
|
|
|
|
* We're trying to keep crap to a minimum; DO NOT add any machine specific
|
|
|
|
* crap here - that's what the boot loader (or in extreme, well justified
|
|
|
|
* circumstances, zImage) is for.
|
|
|
|
*/
|
2009-10-02 20:32:46 +00:00
|
|
|
__HEAD
|
2005-04-16 22:20:36 +00:00
|
|
|
ENTRY(stext)
|
2009-07-24 11:32:54 +00:00
|
|
|
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
|
2005-04-16 22:20:36 +00:00
|
|
|
@ and irqs disabled
|
2006-02-24 21:04:56 +00:00
|
|
|
mrc p15, 0, r9, c0, c0 @ get processor id
|
2005-04-16 22:20:36 +00:00
|
|
|
bl __lookup_processor_type @ r5=procinfo r9=cpuid
|
|
|
|
movs r10, r5 @ invalid processor (r5=0)?
|
2010-11-29 18:43:28 +00:00
|
|
|
THUMB( it eq ) @ force fixup-able long branch encoding
|
2005-11-25 15:43:22 +00:00
|
|
|
beq __error_p @ yes, error 'p'
|
2010-11-22 12:06:28 +00:00
|
|
|
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
#ifndef CONFIG_XIP_KERNEL
|
|
|
|
adr r3, 2f
|
|
|
|
ldmia r3, {r4, r8}
|
|
|
|
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
|
|
|
|
add r8, r8, r4 @ PHYS_OFFSET
|
|
|
|
#else
|
|
|
|
ldr r8, =PLAT_PHYS_OFFSET
|
|
|
|
#endif
|
|
|
|
|
2010-11-22 12:06:28 +00:00
|
|
|
/*
|
|
|
|
* r1 = machine no, r2 = atags,
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
|
2010-11-22 12:06:28 +00:00
|
|
|
*/
|
2007-05-31 21:02:22 +00:00
|
|
|
bl __vet_atags
|
2010-09-04 09:47:48 +00:00
|
|
|
#ifdef CONFIG_SMP_ON_UP
|
|
|
|
bl __fixup_smp
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
|
|
|
|
bl __fixup_pv_table
|
2010-09-04 09:47:48 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
bl __create_page_tables
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following calls CPU specific code in a position independent
|
|
|
|
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
|
2011-01-12 17:50:42 +00:00
|
|
|
* xxx_proc_info structure selected by __lookup_processor_type
|
2005-04-16 22:20:36 +00:00
|
|
|
* above. On return, the CPU will be ready for the MMU to be
|
|
|
|
* turned on, and r0 will hold the CPU control register value.
|
|
|
|
*/
|
2010-10-04 15:22:34 +00:00
|
|
|
ldr r13, =__mmap_switched @ address to jump to after
|
2005-04-16 22:20:36 +00:00
|
|
|
@ mmu has been enabled
|
2010-10-04 16:56:13 +00:00
|
|
|
adr lr, BSYM(1f) @ return (PIC) address
|
2009-07-24 11:32:54 +00:00
|
|
|
ARM( add pc, r10, #PROCINFO_INITFUNC )
|
|
|
|
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
|
|
|
THUMB( mov pc, r12 )
|
2010-10-04 16:56:13 +00:00
|
|
|
1: b __enable_mmu
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(stext)
|
2010-10-04 15:22:34 +00:00
|
|
|
.ltorg
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
#ifndef CONFIG_XIP_KERNEL
|
|
|
|
2: .long .
|
|
|
|
.long PAGE_OFFSET
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup the initial page tables. We only setup the barest
|
|
|
|
* amount which are required to get the kernel running, which
|
|
|
|
* generally means mapping in the kernel code.
|
|
|
|
*
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Returns:
|
2010-10-04 16:51:54 +00:00
|
|
|
* r0, r3, r5-r7 corrupted
|
2005-04-16 22:20:36 +00:00
|
|
|
* r4 = physical page table address
|
|
|
|
*/
|
|
|
|
__create_page_tables:
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
pgtbl r4, r8 @ page table address
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the 16K level 1 swapper page table
|
|
|
|
*/
|
|
|
|
mov r0, r4
|
|
|
|
mov r3, #0
|
|
|
|
add r6, r0, #0x4000
|
|
|
|
1: str r3, [r0], #4
|
|
|
|
str r3, [r0], #4
|
|
|
|
str r3, [r0], #4
|
|
|
|
str r3, [r0], #4
|
|
|
|
teq r0, r6
|
|
|
|
bne 1b
|
|
|
|
|
2006-06-29 17:24:21 +00:00
|
|
|
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2010-10-04 16:51:54 +00:00
|
|
|
* Create identity mapping to cater for __enable_mmu.
|
|
|
|
* This identity mapping will be removed by paging_init().
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2010-10-04 16:51:54 +00:00
|
|
|
adr r0, __enable_mmu_loc
|
|
|
|
ldmia r0, {r3, r5, r6}
|
|
|
|
sub r0, r0, r3 @ virt->phys offset
|
|
|
|
add r5, r5, r0 @ phys __enable_mmu
|
|
|
|
add r6, r6, r0 @ phys __enable_mmu_end
|
|
|
|
mov r5, r5, lsr #20
|
|
|
|
mov r6, r6, lsr #20
|
|
|
|
|
|
|
|
1: orr r3, r7, r5, lsl #20 @ flags + kernel base
|
|
|
|
str r3, [r4, r5, lsl #2] @ identity mapping
|
|
|
|
teq r5, r6
|
|
|
|
addne r5, r5, #1 @ next section
|
|
|
|
bne 1b
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now setup the pagetables for our kernel direct
|
2006-09-29 20:14:05 +00:00
|
|
|
* mapped region.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2010-10-04 16:51:54 +00:00
|
|
|
mov r3, pc
|
|
|
|
mov r3, r3, lsr #20
|
|
|
|
orr r3, r7, r3, lsl #20
|
2007-02-22 15:18:09 +00:00
|
|
|
add r0, r4, #(KERNEL_START & 0xff000000) >> 18
|
|
|
|
str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]!
|
|
|
|
ldr r6, =(KERNEL_END - 1)
|
|
|
|
add r0, r0, #4
|
|
|
|
add r6, r4, r6, lsr #18
|
|
|
|
1: cmp r0, r6
|
|
|
|
add r3, r3, #1 << 20
|
|
|
|
strls r3, [r0], #4
|
|
|
|
bls 1b
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-02-21 14:32:28 +00:00
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
|
|
/*
|
|
|
|
* Map some ram to cover our .data and .bss areas.
|
|
|
|
*/
|
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
|
|
|
add r3, r8, #TEXT_OFFSET
|
|
|
|
orr r3, r3, r7
|
2007-02-21 14:32:28 +00:00
|
|
|
add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18
|
|
|
|
str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
|
|
|
|
ldr r6, =(_end - 1)
|
|
|
|
add r0, r0, #4
|
|
|
|
add r6, r4, r6, lsr #18
|
|
|
|
1: cmp r0, r6
|
|
|
|
add r3, r3, #1 << 20
|
|
|
|
strls r3, [r0], #4
|
|
|
|
bls 1b
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2011-02-02 15:33:17 +00:00
|
|
|
* Then map boot params address in r2 or
|
|
|
|
* the first 1MB of ram if boot params address is not specified.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-02-02 15:33:17 +00:00
|
|
|
mov r0, r2, lsr #20
|
|
|
|
movs r0, r0, lsl #20
|
|
|
|
moveq r0, r8
|
|
|
|
sub r3, r0, r8
|
|
|
|
add r3, r3, #PAGE_OFFSET
|
|
|
|
add r3, r4, r3, lsr #18
|
|
|
|
orr r6, r7, r0
|
|
|
|
str r6, [r3]
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-01 10:56:55 +00:00
|
|
|
#ifdef CONFIG_DEBUG_LL
|
2010-07-07 03:19:48 +00:00
|
|
|
#ifndef CONFIG_DEBUG_ICEDCC
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Map in IO space for serial debugging.
|
|
|
|
* This allows debug messages to be output
|
|
|
|
* via a serial console before paging_init.
|
|
|
|
*/
|
2010-07-07 03:19:48 +00:00
|
|
|
addruart r7, r3
|
|
|
|
|
|
|
|
mov r3, r3, lsr #20
|
|
|
|
mov r3, r3, lsl #2
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
add r0, r4, r3
|
|
|
|
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
|
|
|
|
cmp r3, #0x0800 @ limit to 512MB
|
|
|
|
movhi r3, #0x0800
|
|
|
|
add r6, r0, r3
|
2010-07-07 03:19:48 +00:00
|
|
|
mov r3, r7, lsr #20
|
|
|
|
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
|
|
|
|
orr r3, r7, r3, lsl #20
|
2005-04-16 22:20:36 +00:00
|
|
|
1: str r3, [r0], #4
|
|
|
|
add r3, r3, #1 << 20
|
|
|
|
teq r0, r6
|
|
|
|
bne 1b
|
2010-07-07 03:19:48 +00:00
|
|
|
|
|
|
|
#else /* CONFIG_DEBUG_ICEDCC */
|
|
|
|
/* we don't need any serial debugging mappings for ICEDCC */
|
|
|
|
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
|
|
|
|
#endif /* !CONFIG_DEBUG_ICEDCC */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
|
|
|
|
/*
|
2005-11-25 15:43:22 +00:00
|
|
|
* If we're using the NetWinder or CATS, we also need to map
|
|
|
|
* in the 16550-type serial port for the debug messages
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-07-01 10:56:55 +00:00
|
|
|
add r0, r4, #0xff000000 >> 18
|
|
|
|
orr r3, r7, #0x7c000000
|
|
|
|
str r3, [r0]
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_RPC
|
|
|
|
/*
|
|
|
|
* Map in screen at 0x02000000 & SCREEN2_BASE
|
|
|
|
* Similar reasons here - for debug. This is
|
|
|
|
* only for Acorn RiscPC architectures.
|
|
|
|
*/
|
2005-07-01 10:56:55 +00:00
|
|
|
add r0, r4, #0x02000000 >> 18
|
|
|
|
orr r3, r7, #0x02000000
|
2005-04-16 22:20:36 +00:00
|
|
|
str r3, [r0]
|
2005-07-01 10:56:55 +00:00
|
|
|
add r0, r4, #0xd8000000 >> 18
|
2005-04-16 22:20:36 +00:00
|
|
|
str r3, [r0]
|
2005-07-01 10:56:55 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
mov pc, lr
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(__create_page_tables)
|
2005-04-16 22:20:36 +00:00
|
|
|
.ltorg
|
2010-11-29 18:43:24 +00:00
|
|
|
.align
|
2010-10-04 16:51:54 +00:00
|
|
|
__enable_mmu_loc:
|
|
|
|
.long .
|
|
|
|
.long __enable_mmu
|
|
|
|
.long __enable_mmu_end
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-10-04 16:56:13 +00:00
|
|
|
#if defined(CONFIG_SMP)
|
|
|
|
__CPUINIT
|
|
|
|
ENTRY(secondary_startup)
|
|
|
|
/*
|
|
|
|
* Common entry point for secondary CPUs.
|
|
|
|
*
|
|
|
|
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
|
|
|
|
* the processor type - there is no need to check the machine type
|
|
|
|
* as it has already been validated by the primary processor.
|
|
|
|
*/
|
|
|
|
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
|
|
|
|
mrc p15, 0, r9, c0, c0 @ get processor id
|
|
|
|
bl __lookup_processor_type
|
|
|
|
movs r10, r5 @ invalid processor?
|
|
|
|
moveq r0, #'p' @ yes, error 'p'
|
2010-11-29 18:43:28 +00:00
|
|
|
THUMB( it eq ) @ force fixup-able long branch encoding
|
2010-10-04 16:56:13 +00:00
|
|
|
beq __error_p
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the page tables supplied from __cpu_up.
|
|
|
|
*/
|
|
|
|
adr r4, __secondary_data
|
|
|
|
ldmia r4, {r5, r7, r12} @ address to jump to after
|
|
|
|
sub r4, r4, r5 @ mmu has been enabled
|
|
|
|
ldr r4, [r7, r4] @ get secondary_data.pgdir
|
|
|
|
adr lr, BSYM(__enable_mmu) @ return address
|
|
|
|
mov r13, r12 @ __secondary_switched address
|
|
|
|
ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
|
|
|
|
@ (return control reg)
|
|
|
|
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
|
|
|
THUMB( mov pc, r12 )
|
|
|
|
ENDPROC(secondary_startup)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* r6 = &secondary_data
|
|
|
|
*/
|
|
|
|
ENTRY(__secondary_switched)
|
|
|
|
ldr sp, [r7, #4] @ get secondary_data.stack
|
|
|
|
mov fp, #0
|
|
|
|
b secondary_start_kernel
|
|
|
|
ENDPROC(__secondary_switched)
|
|
|
|
|
2010-11-29 18:43:24 +00:00
|
|
|
.align
|
|
|
|
|
2010-10-04 16:56:13 +00:00
|
|
|
.type __secondary_data, %object
|
|
|
|
__secondary_data:
|
|
|
|
.long .
|
|
|
|
.long secondary_data
|
|
|
|
.long __secondary_switched
|
|
|
|
#endif /* defined(CONFIG_SMP) */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup common bits before finally enabling the MMU. Essentially
|
|
|
|
* this is just loading the page table pointer and domain access
|
|
|
|
* registers.
|
2010-10-04 17:02:59 +00:00
|
|
|
*
|
|
|
|
* r0 = cp#15 control register
|
|
|
|
* r1 = machine ID
|
|
|
|
* r2 = atags pointer
|
|
|
|
* r4 = page table pointer
|
|
|
|
* r9 = processor ID
|
|
|
|
* r13 = *virtual* address to jump to upon completion
|
2010-10-04 16:56:13 +00:00
|
|
|
*/
|
|
|
|
__enable_mmu:
|
|
|
|
#ifdef CONFIG_ALIGNMENT_TRAP
|
|
|
|
orr r0, r0, #CR_A
|
|
|
|
#else
|
|
|
|
bic r0, r0, #CR_A
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_DCACHE_DISABLE
|
|
|
|
bic r0, r0, #CR_C
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_BPREDICT_DISABLE
|
|
|
|
bic r0, r0, #CR_Z
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_ICACHE_DISABLE
|
|
|
|
bic r0, r0, #CR_I
|
|
|
|
#endif
|
|
|
|
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
|
|
|
|
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
|
|
|
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
|
|
|
|
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
|
|
|
|
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
|
|
|
|
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
|
|
|
|
b __turn_mmu_on
|
|
|
|
ENDPROC(__enable_mmu)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable the MMU. This completely changes the structure of the visible
|
|
|
|
* memory space. You will not be able to trace execution through this.
|
|
|
|
* If you have an enquiry about this, *please* check the linux-arm-kernel
|
|
|
|
* mailing list archives BEFORE sending another post to the list.
|
|
|
|
*
|
|
|
|
* r0 = cp#15 control register
|
2010-10-04 17:02:59 +00:00
|
|
|
* r1 = machine ID
|
|
|
|
* r2 = atags pointer
|
|
|
|
* r9 = processor ID
|
2010-10-04 16:56:13 +00:00
|
|
|
* r13 = *virtual* address to jump to upon completion
|
|
|
|
*
|
|
|
|
* other registers depend on the function called upon completion
|
|
|
|
*/
|
|
|
|
.align 5
|
|
|
|
__turn_mmu_on:
|
|
|
|
mov r0, r0
|
|
|
|
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
|
|
|
mrc p15, 0, r3, c0, c0, 0 @ read id reg
|
|
|
|
mov r3, r3
|
|
|
|
mov r3, r13
|
|
|
|
mov pc, r3
|
|
|
|
__enable_mmu_end:
|
|
|
|
ENDPROC(__turn_mmu_on)
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-09-04 09:47:48 +00:00
|
|
|
#ifdef CONFIG_SMP_ON_UP
|
|
|
|
__fixup_smp:
|
2011-01-30 16:40:20 +00:00
|
|
|
and r3, r9, #0x000f0000 @ architecture version
|
|
|
|
teq r3, #0x000f0000 @ CPU ID supported?
|
2010-09-04 09:47:48 +00:00
|
|
|
bne __fixup_smp_on_up @ no, assume UP
|
|
|
|
|
2011-01-30 16:40:20 +00:00
|
|
|
bic r3, r9, #0x00ff0000
|
|
|
|
bic r3, r3, #0x0000000f @ mask 0xff00fff0
|
|
|
|
mov r4, #0x41000000
|
2010-11-22 12:06:28 +00:00
|
|
|
orr r4, r4, #0x0000b000
|
2011-01-30 16:40:20 +00:00
|
|
|
orr r4, r4, #0x00000020 @ val 0x4100b020
|
|
|
|
teq r3, r4 @ ARM 11MPCore?
|
2010-09-04 09:47:48 +00:00
|
|
|
moveq pc, lr @ yes, assume SMP
|
|
|
|
|
|
|
|
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
|
2011-01-30 16:40:20 +00:00
|
|
|
and r0, r0, #0xc0000000 @ multiprocessing extensions and
|
|
|
|
teq r0, #0x80000000 @ not part of a uniprocessor system?
|
|
|
|
moveq pc, lr @ yes, assume SMP
|
2010-09-04 09:47:48 +00:00
|
|
|
|
|
|
|
__fixup_smp_on_up:
|
|
|
|
adr r0, 1f
|
2010-11-22 12:06:28 +00:00
|
|
|
ldmia r0, {r3 - r5}
|
2010-09-04 09:47:48 +00:00
|
|
|
sub r3, r0, r3
|
2010-11-22 12:06:28 +00:00
|
|
|
add r4, r4, r3
|
|
|
|
add r5, r5, r3
|
|
|
|
2: cmp r4, r5
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 14:39:23 +00:00
|
|
|
movhs pc, lr
|
2010-11-22 12:06:28 +00:00
|
|
|
ldmia r4!, {r0, r6}
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 14:39:23 +00:00
|
|
|
ARM( str r6, [r0, r3] )
|
|
|
|
THUMB( add r0, r0, r3 )
|
|
|
|
#ifdef __ARMEB__
|
|
|
|
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
|
|
|
|
#endif
|
|
|
|
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
|
|
|
|
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
|
|
|
|
THUMB( strh r6, [r0] )
|
|
|
|
b 2b
|
2010-09-04 09:47:48 +00:00
|
|
|
ENDPROC(__fixup_smp)
|
|
|
|
|
2010-11-29 18:43:24 +00:00
|
|
|
.align
|
2010-09-04 09:47:48 +00:00
|
|
|
1: .word .
|
|
|
|
.word __smpalt_begin
|
|
|
|
.word __smpalt_end
|
|
|
|
|
|
|
|
.pushsection .data
|
|
|
|
.globl smp_on_up
|
|
|
|
smp_on_up:
|
|
|
|
ALT_SMP(.long 1)
|
|
|
|
ALT_UP(.long 0)
|
|
|
|
.popsection
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
|
|
|
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
|
|
|
|
|
|
|
|
/* __fixup_pv_table - patch the stub instructions with the delta between
|
|
|
|
* PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
|
|
|
|
* can be expressed by an immediate shifter operand. The stub instruction
|
|
|
|
* has a form of '(add|sub) rd, rn, #imm'.
|
|
|
|
*/
|
|
|
|
__HEAD
|
|
|
|
__fixup_pv_table:
|
|
|
|
adr r0, 1f
|
|
|
|
ldmia r0, {r3-r5, r7}
|
|
|
|
sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
|
|
|
|
add r4, r4, r3 @ adjust table start address
|
|
|
|
add r5, r5, r3 @ adjust table end address
|
|
|
|
str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset
|
2011-01-04 19:39:29 +00:00
|
|
|
#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
|
|
|
mov r6, r3, lsr #24 @ constant for add/sub instructions
|
|
|
|
teq r3, r6, lsl #24 @ must be 16MiB aligned
|
2011-01-04 19:39:29 +00:00
|
|
|
#else
|
|
|
|
mov r6, r3, lsr #16 @ constant for add/sub instructions
|
|
|
|
teq r3, r6, lsl #16 @ must be 64kiB aligned
|
|
|
|
#endif
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
|
|
|
bne __error
|
|
|
|
str r6, [r7, #4] @ save to __pv_offset
|
|
|
|
b __fixup_a_pv_table
|
|
|
|
ENDPROC(__fixup_pv_table)
|
|
|
|
|
|
|
|
.align
|
|
|
|
1: .long .
|
|
|
|
.long __pv_table_begin
|
|
|
|
.long __pv_table_end
|
|
|
|
2: .long __pv_phys_offset
|
|
|
|
|
|
|
|
.text
|
|
|
|
__fixup_a_pv_table:
|
2011-01-04 19:39:29 +00:00
|
|
|
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
|
|
|
|
and r0, r6, #255 @ offset bits 23-16
|
|
|
|
mov r6, r6, lsr #8 @ offset bits 31-24
|
|
|
|
#else
|
|
|
|
mov r0, #0 @ just in case...
|
|
|
|
#endif
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
|
|
|
b 3f
|
|
|
|
2: ldr ip, [r7, r3]
|
|
|
|
bic ip, ip, #0x000000ff
|
2011-01-04 19:39:29 +00:00
|
|
|
tst ip, #0x400 @ rotate shift tells us LS or MS byte
|
|
|
|
orrne ip, ip, r6 @ mask in offset bits 31-24
|
|
|
|
orreq ip, ip, r0 @ mask in offset bits 23-16
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
|
|
|
str ip, [r7, r3]
|
|
|
|
3: cmp r4, r5
|
|
|
|
ldrcc r7, [r4], #4 @ use branch for delay slot
|
|
|
|
bcc 2b
|
|
|
|
mov pc, lr
|
|
|
|
ENDPROC(__fixup_a_pv_table)
|
|
|
|
|
|
|
|
ENTRY(fixup_pv_table)
|
|
|
|
stmfd sp!, {r4 - r7, lr}
|
|
|
|
ldr r2, 2f @ get address of __pv_phys_offset
|
|
|
|
mov r3, #0 @ no offset
|
|
|
|
mov r4, r0 @ r0 = table start
|
|
|
|
add r5, r0, r1 @ r1 = table size
|
|
|
|
ldr r6, [r2, #4] @ get __pv_offset
|
|
|
|
bl __fixup_a_pv_table
|
|
|
|
ldmfd sp!, {r4 - r7, pc}
|
|
|
|
ENDPROC(fixup_pv_table)
|
|
|
|
|
|
|
|
.align
|
|
|
|
2: .long __pv_phys_offset
|
|
|
|
|
|
|
|
.data
|
|
|
|
.globl __pv_phys_offset
|
|
|
|
.type __pv_phys_offset, %object
|
|
|
|
__pv_phys_offset:
|
|
|
|
.long 0
|
|
|
|
.size __pv_phys_offset, . - __pv_phys_offset
|
|
|
|
__pv_offset:
|
|
|
|
.long 0
|
|
|
|
#endif
|
|
|
|
|
2006-03-27 13:58:25 +00:00
|
|
|
#include "head-common.S"
|