2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Low-level CPU initialisation
|
|
|
|
* Based on arch/arm/kernel/head.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994-2002 Russell King
|
|
|
|
* Copyright (C) 2003-2012 ARM Ltd.
|
|
|
|
* Authors: Catalin Marinas <catalin.marinas@arm.com>
|
|
|
|
* Will Deacon <will.deacon@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/init.h>
|
2014-06-30 15:01:31 +00:00
|
|
|
#include <linux/irqchip/arm-gic-v3.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
|
|
|
|
#include <asm/assembler.h>
|
2016-04-18 15:09:47 +00:00
|
|
|
#include <asm/boot.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
2014-03-26 18:25:55 +00:00
|
|
|
#include <asm/cache.h>
|
2012-08-29 17:32:18 +00:00
|
|
|
#include <asm/cputype.h>
|
2016-01-26 08:13:44 +00:00
|
|
|
#include <asm/elf.h>
|
2015-10-19 13:19:27 +00:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2014-02-19 09:33:14 +00:00
|
|
|
#include <asm/kvm_arm.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm/memory.h>
|
|
|
|
#include <asm/pgtable-hwdef.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/page.h>
|
2016-02-23 10:31:42 +00:00
|
|
|
#include <asm/smp.h>
|
2015-10-19 13:19:35 +00:00
|
|
|
#include <asm/sysreg.h>
|
|
|
|
#include <asm/thread_info.h>
|
2012-10-26 14:40:05 +00:00
|
|
|
#include <asm/virt.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2015-03-17 08:14:29 +00:00
|
|
|
#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2014-08-13 17:53:03 +00:00
|
|
|
#if (TEXT_OFFSET & 0xfff) != 0
|
|
|
|
#error TEXT_OFFSET must be at least 4KB aligned
|
|
|
|
#elif (PAGE_OFFSET & 0x1fffff) != 0
|
2014-06-24 15:51:37 +00:00
|
|
|
#error PAGE_OFFSET must be at least 2MB aligned
|
2014-08-13 17:53:03 +00:00
|
|
|
#elif TEXT_OFFSET > 0x1fffff
|
2014-06-24 15:51:37 +00:00
|
|
|
#error TEXT_OFFSET must be less than 2MB
|
2012-03-05 11:49:27 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernel startup entry point.
|
|
|
|
* ---------------------------
|
|
|
|
*
|
|
|
|
* The requirements are:
|
|
|
|
* MMU = off, D-cache = off, I-cache = on or off,
|
|
|
|
* x0 = physical address to the FDT blob.
|
|
|
|
*
|
|
|
|
* This code is mostly position independent so you call this at
|
|
|
|
* __pa(PAGE_OFFSET + TEXT_OFFSET).
|
|
|
|
*
|
|
|
|
* Note that the callee-saved registers are used for storing variables
|
|
|
|
* that are useful before the MMU is enabled. The allocations are described
|
|
|
|
* in the entry routines.
|
|
|
|
*/
|
|
|
|
__HEAD
|
2015-12-26 11:46:40 +00:00
|
|
|
_head:
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
|
|
|
|
*/
|
2014-04-16 02:47:52 +00:00
|
|
|
#ifdef CONFIG_EFI
|
|
|
|
/*
|
|
|
|
* This add instruction has no meaningful effect except that
|
|
|
|
* its opcode forms the magic "MZ" signature required by UEFI.
|
|
|
|
*/
|
|
|
|
add x13, x18, #0x16
|
|
|
|
b stext
|
|
|
|
#else
|
2012-03-05 11:49:27 +00:00
|
|
|
b stext // branch to kernel start, magic
|
|
|
|
.long 0 // reserved
|
2014-04-16 02:47:52 +00:00
|
|
|
#endif
|
2015-12-26 12:48:02 +00:00
|
|
|
le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
|
|
|
|
le64sym _kernel_size_le // Effective size of kernel image, little-endian
|
|
|
|
le64sym _kernel_flags_le // Informative flags, little-endian
|
2013-08-14 23:10:00 +00:00
|
|
|
.quad 0 // reserved
|
|
|
|
.quad 0 // reserved
|
|
|
|
.quad 0 // reserved
|
|
|
|
.byte 0x41 // Magic number, "ARM\x64"
|
|
|
|
.byte 0x52
|
|
|
|
.byte 0x4d
|
|
|
|
.byte 0x64
|
2014-04-16 02:47:52 +00:00
|
|
|
#ifdef CONFIG_EFI
|
2015-12-26 11:46:40 +00:00
|
|
|
.long pe_header - _head // Offset to the PE header.
|
2014-04-16 02:47:52 +00:00
|
|
|
#else
|
2013-08-14 23:10:00 +00:00
|
|
|
.word 0 // reserved
|
2014-04-16 02:47:52 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_EFI
|
|
|
|
.align 3
|
|
|
|
pe_header:
|
|
|
|
.ascii "PE"
|
|
|
|
.short 0
|
|
|
|
coff_header:
|
|
|
|
.short 0xaa64 // AArch64
|
|
|
|
.short 2 // nr_sections
|
|
|
|
.long 0 // TimeDateStamp
|
|
|
|
.long 0 // PointerToSymbolTable
|
|
|
|
.long 1 // NumberOfSymbols
|
|
|
|
.short section_table - optional_header // SizeOfOptionalHeader
|
|
|
|
.short 0x206 // Characteristics.
|
|
|
|
// IMAGE_FILE_DEBUG_STRIPPED |
|
|
|
|
// IMAGE_FILE_EXECUTABLE_IMAGE |
|
|
|
|
// IMAGE_FILE_LINE_NUMS_STRIPPED
|
|
|
|
optional_header:
|
|
|
|
.short 0x20b // PE32+ format
|
|
|
|
.byte 0x02 // MajorLinkerVersion
|
|
|
|
.byte 0x14 // MinorLinkerVersion
|
2016-03-30 15:43:07 +00:00
|
|
|
.long _end - efi_header_end // SizeOfCode
|
2014-04-16 02:47:52 +00:00
|
|
|
.long 0 // SizeOfInitializedData
|
|
|
|
.long 0 // SizeOfUninitializedData
|
2015-12-26 11:46:40 +00:00
|
|
|
.long __efistub_entry - _head // AddressOfEntryPoint
|
2016-03-30 15:43:07 +00:00
|
|
|
.long efi_header_end - _head // BaseOfCode
|
2014-04-16 02:47:52 +00:00
|
|
|
|
|
|
|
extra_header_fields:
|
|
|
|
.quad 0 // ImageBase
|
2014-10-10 09:25:24 +00:00
|
|
|
.long 0x1000 // SectionAlignment
|
2014-10-10 16:42:55 +00:00
|
|
|
.long PECOFF_FILE_ALIGNMENT // FileAlignment
|
2014-04-16 02:47:52 +00:00
|
|
|
.short 0 // MajorOperatingSystemVersion
|
|
|
|
.short 0 // MinorOperatingSystemVersion
|
|
|
|
.short 0 // MajorImageVersion
|
|
|
|
.short 0 // MinorImageVersion
|
|
|
|
.short 0 // MajorSubsystemVersion
|
|
|
|
.short 0 // MinorSubsystemVersion
|
|
|
|
.long 0 // Win32VersionValue
|
|
|
|
|
2015-12-26 11:46:40 +00:00
|
|
|
.long _end - _head // SizeOfImage
|
2014-04-16 02:47:52 +00:00
|
|
|
|
|
|
|
// Everything before the kernel image is considered part of the header
|
2016-03-30 15:43:07 +00:00
|
|
|
.long efi_header_end - _head // SizeOfHeaders
|
2014-04-16 02:47:52 +00:00
|
|
|
.long 0 // CheckSum
|
|
|
|
.short 0xa // Subsystem (EFI application)
|
|
|
|
.short 0 // DllCharacteristics
|
|
|
|
.quad 0 // SizeOfStackReserve
|
|
|
|
.quad 0 // SizeOfStackCommit
|
|
|
|
.quad 0 // SizeOfHeapReserve
|
|
|
|
.quad 0 // SizeOfHeapCommit
|
|
|
|
.long 0 // LoaderFlags
|
|
|
|
.long 0x6 // NumberOfRvaAndSizes
|
|
|
|
|
|
|
|
.quad 0 // ExportTable
|
|
|
|
.quad 0 // ImportTable
|
|
|
|
.quad 0 // ResourceTable
|
|
|
|
.quad 0 // ExceptionTable
|
|
|
|
.quad 0 // CertificationTable
|
|
|
|
.quad 0 // BaseRelocationTable
|
|
|
|
|
|
|
|
// Section table
|
|
|
|
section_table:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The EFI application loader requires a relocation section
|
|
|
|
* because EFI applications must be relocatable. This is a
|
|
|
|
* dummy section as far as we are concerned.
|
|
|
|
*/
|
|
|
|
.ascii ".reloc"
|
|
|
|
.byte 0
|
|
|
|
.byte 0 // end of 0 padding of section name
|
|
|
|
.long 0
|
|
|
|
.long 0
|
|
|
|
.long 0 // SizeOfRawData
|
|
|
|
.long 0 // PointerToRawData
|
|
|
|
.long 0 // PointerToRelocations
|
|
|
|
.long 0 // PointerToLineNumbers
|
|
|
|
.short 0 // NumberOfRelocations
|
|
|
|
.short 0 // NumberOfLineNumbers
|
|
|
|
.long 0x42100040 // Characteristics (section flags)
|
|
|
|
|
|
|
|
|
|
|
|
.ascii ".text"
|
|
|
|
.byte 0
|
|
|
|
.byte 0
|
|
|
|
.byte 0 // end of 0 padding of section name
|
2016-03-30 15:43:07 +00:00
|
|
|
.long _end - efi_header_end // VirtualSize
|
|
|
|
.long efi_header_end - _head // VirtualAddress
|
|
|
|
.long _edata - efi_header_end // SizeOfRawData
|
|
|
|
.long efi_header_end - _head // PointerToRawData
|
2014-04-16 02:47:52 +00:00
|
|
|
|
|
|
|
.long 0 // PointerToRelocations (0 for executables)
|
|
|
|
.long 0 // PointerToLineNumbers (0 for executables)
|
|
|
|
.short 0 // NumberOfRelocations (0 for executables)
|
|
|
|
.short 0 // NumberOfLineNumbers (0 for executables)
|
|
|
|
.long 0xe0500020 // Characteristics (section flags)
|
2014-10-10 09:25:24 +00:00
|
|
|
|
|
|
|
/*
|
2016-03-30 15:43:07 +00:00
|
|
|
* EFI will load .text onwards at the 4k section alignment
|
2014-10-10 09:25:24 +00:00
|
|
|
* described in the PE/COFF header. To ensure that instruction
|
|
|
|
* sequences using an adrp and a :lo12: immediate will function
|
2016-03-30 15:43:07 +00:00
|
|
|
* correctly at this alignment, we must ensure that .text is
|
2014-10-10 09:25:24 +00:00
|
|
|
* placed at a 4k boundary in the Image to begin with.
|
|
|
|
*/
|
|
|
|
.align 12
|
2016-03-30 15:43:07 +00:00
|
|
|
efi_header_end:
|
2014-04-16 02:47:52 +00:00
|
|
|
#endif
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-03-30 15:43:07 +00:00
|
|
|
__INIT
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
ENTRY(stext)
|
2015-03-17 09:55:12 +00:00
|
|
|
bl preserve_boot_args
|
2013-10-11 13:52:16 +00:00
|
|
|
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
|
2015-03-17 08:14:29 +00:00
|
|
|
adrp x24, __PHYS_OFFSET
|
2016-04-18 15:09:47 +00:00
|
|
|
and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
|
2013-10-11 13:52:16 +00:00
|
|
|
bl set_cpu_boot_mode_flag
|
2012-03-05 11:49:27 +00:00
|
|
|
bl __create_page_tables // x25=TTBR0, x26=TTBR1
|
|
|
|
/*
|
2015-03-18 14:55:20 +00:00
|
|
|
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
|
|
|
|
* details.
|
2012-03-05 11:49:27 +00:00
|
|
|
* On return, the CPU will be ready for the MMU to be turned on and
|
|
|
|
* the TCR will have been set.
|
|
|
|
*/
|
2016-04-18 15:09:43 +00:00
|
|
|
bl __cpu_setup // initialise processor
|
|
|
|
adr_l x27, __primary_switch // address to jump to after
|
|
|
|
// MMU has been enabled
|
|
|
|
b __enable_mmu
|
2012-03-05 11:49:27 +00:00
|
|
|
ENDPROC(stext)
|
|
|
|
|
2015-03-17 09:55:12 +00:00
|
|
|
/*
|
|
|
|
* Preserve the arguments passed by the bootloader in x0 .. x3
|
|
|
|
*/
|
|
|
|
preserve_boot_args:
|
|
|
|
mov x21, x0 // x21=FDT
|
|
|
|
|
|
|
|
adr_l x0, boot_args // record the contents of
|
|
|
|
stp x21, x1, [x0] // x0 .. x3 at kernel entry
|
|
|
|
stp x2, x3, [x0, #16]
|
|
|
|
|
|
|
|
dmb sy // needed before dc ivac with
|
|
|
|
// MMU off
|
|
|
|
|
|
|
|
add x1, x0, #0x20 // 4 x 8 bytes
|
|
|
|
b __inval_cache_range // tail call
|
|
|
|
ENDPROC(preserve_boot_args)
|
|
|
|
|
2014-11-21 21:50:41 +00:00
|
|
|
/*
|
|
|
|
* Macro to create a table entry to the next page.
|
|
|
|
*
|
|
|
|
* tbl: page table address
|
|
|
|
* virt: virtual address
|
|
|
|
* shift: #imm page table shift
|
|
|
|
* ptrs: #imm pointers per table page
|
|
|
|
*
|
|
|
|
* Preserves: virt
|
|
|
|
* Corrupts: tmp1, tmp2
|
|
|
|
* Returns: tbl -> next level table page address
|
|
|
|
*/
|
|
|
|
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
|
|
|
|
lsr \tmp1, \virt, #\shift
|
|
|
|
and \tmp1, \tmp1, #\ptrs - 1 // table index
|
|
|
|
add \tmp2, \tbl, #PAGE_SIZE
|
|
|
|
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
|
|
|
|
str \tmp2, [\tbl, \tmp1, lsl #3]
|
|
|
|
add \tbl, \tbl, #PAGE_SIZE // next level table page
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Macro to populate the PGD (and possibily PUD) for the corresponding
|
|
|
|
* block entry in the next level (tbl) for the given virtual address.
|
|
|
|
*
|
|
|
|
* Preserves: tbl, next, virt
|
|
|
|
* Corrupts: tmp1, tmp2
|
|
|
|
*/
|
|
|
|
.macro create_pgd_entry, tbl, virt, tmp1, tmp2
|
|
|
|
create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
|
2015-10-19 13:19:31 +00:00
|
|
|
#if SWAPPER_PGTABLE_LEVELS > 3
|
|
|
|
create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
|
|
|
|
#endif
|
|
|
|
#if SWAPPER_PGTABLE_LEVELS > 2
|
2015-10-19 13:19:27 +00:00
|
|
|
create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
|
2014-11-21 21:50:41 +00:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Macro to populate block entries in the page table for the start..end
|
|
|
|
* virtual range (inclusive).
|
|
|
|
*
|
|
|
|
* Preserves: tbl, flags
|
|
|
|
* Corrupts: phys, start, end, pstate
|
|
|
|
*/
|
|
|
|
.macro create_block_map, tbl, flags, phys, start, end
|
2015-10-19 13:19:27 +00:00
|
|
|
lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT
|
|
|
|
lsr \start, \start, #SWAPPER_BLOCK_SHIFT
|
2014-11-21 21:50:41 +00:00
|
|
|
and \start, \start, #PTRS_PER_PTE - 1 // table index
|
2015-10-19 13:19:27 +00:00
|
|
|
orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry
|
|
|
|
lsr \end, \end, #SWAPPER_BLOCK_SHIFT
|
2014-11-21 21:50:41 +00:00
|
|
|
and \end, \end, #PTRS_PER_PTE - 1 // table end index
|
|
|
|
9999: str \phys, [\tbl, \start, lsl #3] // store the entry
|
|
|
|
add \start, \start, #1 // next entry
|
2015-10-19 13:19:27 +00:00
|
|
|
add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block
|
2014-11-21 21:50:41 +00:00
|
|
|
cmp \start, \end
|
|
|
|
b.ls 9999b
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup the initial page tables. We only setup the barest amount which is
|
|
|
|
* required to get the kernel running. The following sections are required:
|
|
|
|
* - identity mapping to enable the MMU (low address, TTBR0)
|
|
|
|
* - first few MB of the kernel linear mapping to jump to once the MMU has
|
2015-06-01 11:40:32 +00:00
|
|
|
* been enabled
|
2014-11-21 21:50:41 +00:00
|
|
|
*/
|
|
|
|
__create_page_tables:
|
2015-03-17 08:14:29 +00:00
|
|
|
adrp x25, idmap_pg_dir
|
|
|
|
adrp x26, swapper_pg_dir
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
mov x28, lr
|
2014-11-21 21:50:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Invalidate the idmap and swapper page tables to avoid potential
|
|
|
|
* dirty cache lines being evicted.
|
|
|
|
*/
|
|
|
|
mov x0, x25
|
|
|
|
add x1, x26, #SWAPPER_DIR_SIZE
|
|
|
|
bl __inval_cache_range
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the idmap and swapper page tables.
|
|
|
|
*/
|
|
|
|
mov x0, x25
|
|
|
|
add x6, x26, #SWAPPER_DIR_SIZE
|
|
|
|
1: stp xzr, xzr, [x0], #16
|
|
|
|
stp xzr, xzr, [x0], #16
|
|
|
|
stp xzr, xzr, [x0], #16
|
|
|
|
stp xzr, xzr, [x0], #16
|
|
|
|
cmp x0, x6
|
|
|
|
b.lo 1b
|
|
|
|
|
2016-04-18 15:09:45 +00:00
|
|
|
mov x7, SWAPPER_MM_MMUFLAGS
|
2014-11-21 21:50:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create the identity mapping.
|
|
|
|
*/
|
|
|
|
mov x0, x25 // idmap_pg_dir
|
2015-06-01 11:40:33 +00:00
|
|
|
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
|
|
|
|
#ifndef CONFIG_ARM64_VA_BITS_48
|
|
|
|
#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
|
|
|
|
#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If VA_BITS < 48, it may be too small to allow for an ID mapping to be
|
|
|
|
* created that covers system RAM if that is located sufficiently high
|
|
|
|
* in the physical address space. So for the ID map, use an extended
|
|
|
|
* virtual range in that case, by configuring an additional translation
|
|
|
|
* level.
|
|
|
|
* First, we have to verify our assumption that the current value of
|
|
|
|
* VA_BITS was chosen such that all translation levels are fully
|
|
|
|
* utilised, and that lowering T0SZ will always result in an additional
|
|
|
|
* translation level to be configured.
|
|
|
|
*/
|
|
|
|
#if VA_BITS != EXTRA_SHIFT
|
|
|
|
#error "Mismatch between VA_BITS and page size/number of translation levels"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
|
2015-06-01 11:40:33 +00:00
|
|
|
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
* this number conveniently equals the number of leading zeroes in
|
2015-06-01 11:40:33 +00:00
|
|
|
* the physical address of __idmap_text_end.
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
*/
|
2015-06-01 11:40:33 +00:00
|
|
|
adrp x5, __idmap_text_end
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
clz x5, x5
|
|
|
|
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
|
|
|
b.ge 1f // .. then skip additional level
|
|
|
|
|
2015-03-24 15:10:21 +00:00
|
|
|
adr_l x6, idmap_t0sz
|
|
|
|
str x5, [x6]
|
|
|
|
dmb sy
|
|
|
|
dc ivac, x6 // Invalidate potentially stale cache line
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
|
|
|
|
create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
|
|
|
|
1:
|
|
|
|
#endif
|
|
|
|
|
2014-11-21 21:50:41 +00:00
|
|
|
create_pgd_entry x0, x3, x5, x6
|
2015-06-01 11:40:33 +00:00
|
|
|
mov x5, x3 // __pa(__idmap_text_start)
|
|
|
|
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
|
2014-11-21 21:50:41 +00:00
|
|
|
create_block_map x0, x7, x3, x5, x6
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map the kernel image (starting with PHYS_OFFSET).
|
|
|
|
*/
|
|
|
|
mov x0, x26 // swapper_pg_dir
|
arm64: don't map TEXT_OFFSET bytes below the kernel if we can avoid it
For historical reasons, the kernel Image must be loaded into physical
memory at a 512 KB offset above a 2 MB aligned base address. The region
between the base address and the start of the kernel Image has no
significance to the kernel itself, but it is currently mapped explicitly
into the early kernel VMA range for all translation granules.
In some cases (i.e., 4 KB granule), this is unavoidable, due to the 2 MB
granularity of the early kernel mappings. However, in other cases, e.g.,
when running with larger page sizes, or in the future, with more granular
KASLR, there is no reason to map it explicitly like we do currently.
So update the logic so that the region is mapped only if that happens as
a side effect of rounding the start address of the kernel to swapper block
size, and leave it unmapped otherwise.
Since the symbol kernel_img_size now simply resolves to the memory
footprint of the kernel Image, we can drop its definition from image.h
and opencode its calculation.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-18 15:09:46 +00:00
|
|
|
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
add x5, x5, x23 // add KASLR displacement
|
2014-11-21 21:50:41 +00:00
|
|
|
create_pgd_entry x0, x5, x3, x6
|
arm64: don't map TEXT_OFFSET bytes below the kernel if we can avoid it
For historical reasons, the kernel Image must be loaded into physical
memory at a 512 KB offset above a 2 MB aligned base address. The region
between the base address and the start of the kernel Image has no
significance to the kernel itself, but it is currently mapped explicitly
into the early kernel VMA range for all translation granules.
In some cases (i.e., 4 KB granule), this is unavoidable, due to the 2 MB
granularity of the early kernel mappings. However, in other cases, e.g.,
when running with larger page sizes, or in the future, with more granular
KASLR, there is no reason to map it explicitly like we do currently.
So update the logic so that the region is mapped only if that happens as
a side effect of rounding the start address of the kernel to swapper block
size, and leave it unmapped otherwise.
Since the symbol kernel_img_size now simply resolves to the memory
footprint of the kernel Image, we can drop its definition from image.h
and opencode its calculation.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-18 15:09:46 +00:00
|
|
|
adrp x6, _end // runtime __pa(_end)
|
|
|
|
adrp x3, _text // runtime __pa(_text)
|
|
|
|
sub x6, x6, x3 // _end - _text
|
|
|
|
add x6, x6, x5 // runtime __va(_end)
|
2014-11-21 21:50:41 +00:00
|
|
|
create_block_map x0, x7, x3, x5, x6
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since the page tables have been populated with non-cacheable
|
|
|
|
* accesses (MMU disabled), invalidate the idmap and swapper page
|
|
|
|
* tables again to remove any speculatively loaded cache lines.
|
|
|
|
*/
|
|
|
|
mov x0, x25
|
|
|
|
add x1, x26, #SWAPPER_DIR_SIZE
|
2015-03-24 13:50:27 +00:00
|
|
|
dmb sy
|
2014-11-21 21:50:41 +00:00
|
|
|
bl __inval_cache_range
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
ret x28
|
2014-11-21 21:50:41 +00:00
|
|
|
ENDPROC(__create_page_tables)
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
/*
|
2015-03-04 10:51:48 +00:00
|
|
|
* The following fragment of code is executed with the MMU enabled.
|
2014-11-21 21:50:41 +00:00
|
|
|
*/
|
2015-03-04 10:51:48 +00:00
|
|
|
.set initial_sp, init_thread_union + THREAD_START_SP
|
2016-04-18 15:09:43 +00:00
|
|
|
__primary_switched:
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
mov x28, lr // preserve LR
|
2015-12-26 11:46:40 +00:00
|
|
|
adr_l x8, vectors // load VBAR_EL1 with virtual
|
|
|
|
msr vbar_el1, x8 // vector table address
|
|
|
|
isb
|
|
|
|
|
2016-01-06 11:05:27 +00:00
|
|
|
// Clear BSS
|
|
|
|
adr_l x0, __bss_start
|
|
|
|
mov x1, xzr
|
|
|
|
adr_l x2, __bss_stop
|
|
|
|
sub x2, x2, x0
|
|
|
|
bl __pi_memset
|
arm64: mm: place empty_zero_page in bss
Currently the zero page is set up in paging_init, and thus we cannot use
the zero page earlier. We use the zero page as a reserved TTBR value
from which no TLB entries may be allocated (e.g. when uninstalling the
idmap). To enable such usage earlier (as may be required for invasive
changes to the kernel page tables), and to minimise the time that the
idmap is active, we need to be able to use the zero page before
paging_init.
This patch follows the example set by x86, by allocating the zero page
at compile time, in .bss. This means that the zero page itself is
available immediately upon entry to start_kernel (as we zero .bss before
this), and also means that the zero page takes up no space in the raw
Image binary. The associated struct page is allocated in bootmem_init,
and remains unavailable until this time.
Outside of arch code, the only users of empty_zero_page assume that the
empty_zero_page symbol refers to the zeroed memory itself, and that
ZERO_PAGE(x) must be used to acquire the associated struct page,
following the example of x86. This patch also brings arm64 inline with
these assumptions.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:44:57 +00:00
|
|
|
dsb ishst // Make zero page visible to PTW
|
2016-01-06 11:05:27 +00:00
|
|
|
|
2015-03-04 10:51:48 +00:00
|
|
|
adr_l sp, initial_sp, x4
|
2015-12-04 11:02:25 +00:00
|
|
|
mov x4, sp
|
|
|
|
and x4, x4, #~(THREAD_SIZE - 1)
|
|
|
|
msr sp_el0, x4 // Save thread_info
|
2015-03-04 10:51:48 +00:00
|
|
|
str_l x21, __fdt_pointer, x5 // Save FDT pointer
|
2016-02-16 12:52:42 +00:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
ldr_l x4, kimage_vaddr // Save the offset between
|
2016-02-16 12:52:42 +00:00
|
|
|
sub x4, x4, x24 // the kernel virtual and
|
|
|
|
str_l x4, kimage_voffset, x5 // physical mappings
|
|
|
|
|
2014-11-21 21:50:41 +00:00
|
|
|
mov x29, #0
|
2015-10-12 15:52:58 +00:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
bl kasan_early_init
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
2016-04-18 15:09:47 +00:00
|
|
|
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
|
|
|
|
b.ne 0f
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
mov x0, x21 // pass FDT address in x0
|
2016-04-18 15:09:47 +00:00
|
|
|
mov x1, x23 // pass modulo offset in x1
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
bl kaslr_early_init // parse FDT for KASLR options
|
|
|
|
cbz x0, 0f // KASLR disabled? just proceed
|
2016-04-18 15:09:47 +00:00
|
|
|
orr x23, x23, x0 // record KASLR offset
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
ret x28 // we must enable KASLR, return
|
|
|
|
// to __enable_mmu()
|
|
|
|
0:
|
2015-10-12 15:52:58 +00:00
|
|
|
#endif
|
2014-11-21 21:50:41 +00:00
|
|
|
b start_kernel
|
2016-04-18 15:09:43 +00:00
|
|
|
ENDPROC(__primary_switched)
|
2014-11-21 21:50:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* end early head section, begin head code that is also used for
|
|
|
|
* hotplug and needs to have the same protections as the text region
|
|
|
|
*/
|
|
|
|
.section ".text","ax"
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
|
|
|
|
ENTRY(kimage_vaddr)
|
|
|
|
.quad _text - TEXT_OFFSET
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* If we're fortunate enough to boot at EL2, ensure that the world is
|
|
|
|
* sane before dropping to EL1.
|
2013-10-11 13:52:16 +00:00
|
|
|
*
|
|
|
|
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
|
|
|
|
* booted in EL1 or EL2 respectively.
|
2012-03-05 11:49:27 +00:00
|
|
|
*/
|
|
|
|
ENTRY(el2_setup)
|
|
|
|
mrs x0, CurrentEL
|
2014-06-06 13:16:21 +00:00
|
|
|
cmp x0, #CurrentEL_EL2
|
2013-10-11 13:52:17 +00:00
|
|
|
b.ne 1f
|
|
|
|
mrs x0, sctlr_el2
|
|
|
|
CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
|
|
|
|
CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
|
|
|
|
msr sctlr_el2, x0
|
|
|
|
b 2f
|
|
|
|
1: mrs x0, sctlr_el1
|
|
|
|
CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
|
|
|
|
CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
|
|
|
|
msr sctlr_el1, x0
|
2013-10-11 13:52:16 +00:00
|
|
|
mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
|
2013-10-11 13:52:17 +00:00
|
|
|
isb
|
2012-03-05 11:49:27 +00:00
|
|
|
ret
|
|
|
|
|
2014-02-19 09:33:14 +00:00
|
|
|
2:
|
|
|
|
#ifdef CONFIG_ARM64_VHE
|
|
|
|
/*
|
|
|
|
* Check for VHE being present. For the rest of the EL2 setup,
|
|
|
|
* x2 being non-zero indicates that we do have VHE, and that the
|
|
|
|
* kernel is intended to run at EL2.
|
|
|
|
*/
|
|
|
|
mrs x2, id_aa64mmfr1_el1
|
|
|
|
ubfx x2, x2, #8, #4
|
|
|
|
#else
|
|
|
|
mov x2, xzr
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/* Hyp configuration. */
|
2014-02-19 09:33:14 +00:00
|
|
|
mov x0, #HCR_RW // 64-bit EL1
|
|
|
|
cbz x2, set_hcr
|
|
|
|
orr x0, x0, #HCR_TGE // Enable Host Extensions
|
|
|
|
orr x0, x0, #HCR_E2H
|
|
|
|
set_hcr:
|
2012-03-05 11:49:27 +00:00
|
|
|
msr hcr_el2, x0
|
2014-02-19 09:33:14 +00:00
|
|
|
isb
|
2012-03-05 11:49:27 +00:00
|
|
|
|
|
|
|
/* Generic timers. */
|
|
|
|
mrs x0, cnthctl_el2
|
|
|
|
orr x0, x0, #3 // Enable EL1 physical timers
|
|
|
|
msr cnthctl_el2, x0
|
2012-11-29 22:48:31 +00:00
|
|
|
msr cntvoff_el2, xzr // Clear virtual offset
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
#ifdef CONFIG_ARM_GIC_V3
|
|
|
|
/* GICv3 system register access */
|
|
|
|
mrs x0, id_aa64pfr0_el1
|
|
|
|
ubfx x0, x0, #24, #4
|
|
|
|
cmp x0, #1
|
|
|
|
b.ne 3f
|
|
|
|
|
2014-07-24 13:14:42 +00:00
|
|
|
mrs_s x0, ICC_SRE_EL2
|
2014-06-30 15:01:31 +00:00
|
|
|
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
|
|
|
|
orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
|
2014-07-24 13:14:42 +00:00
|
|
|
msr_s ICC_SRE_EL2, x0
|
2014-06-30 15:01:31 +00:00
|
|
|
isb // Make sure SRE is now set
|
2015-09-30 10:39:59 +00:00
|
|
|
mrs_s x0, ICC_SRE_EL2 // Read SRE back,
|
|
|
|
tbz x0, #0, 3f // and check that it sticks
|
2014-07-24 13:14:42 +00:00
|
|
|
msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
3:
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/* Populate ID registers. */
|
|
|
|
mrs x0, midr_el1
|
|
|
|
mrs x1, mpidr_el1
|
|
|
|
msr vpidr_el2, x0
|
|
|
|
msr vmpidr_el2, x1
|
|
|
|
|
|
|
|
/* sctlr_el1 */
|
|
|
|
mov x0, #0x0800 // Set/clear RES{1,0} bits
|
2013-10-11 13:52:17 +00:00
|
|
|
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
|
|
|
|
CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
|
2012-03-05 11:49:27 +00:00
|
|
|
msr sctlr_el1, x0
|
|
|
|
|
|
|
|
/* Coprocessor traps. */
|
|
|
|
mov x0, #0x33ff
|
|
|
|
msr cptr_el2, x0 // Disable copro. traps to EL2
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
msr hstr_el2, xzr // Disable CP15 traps to EL2
|
|
|
|
#endif
|
|
|
|
|
2015-09-02 17:49:28 +00:00
|
|
|
/* EL2 debug */
|
2016-01-13 14:50:03 +00:00
|
|
|
mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
|
|
|
|
sbfx x0, x0, #8, #4
|
|
|
|
cmp x0, #1
|
|
|
|
b.lt 4f // Skip if no PMU present
|
2015-09-02 17:49:28 +00:00
|
|
|
mrs x0, pmcr_el0 // Disable debug access traps
|
|
|
|
ubfx x0, x0, #11, #5 // to EL2 and allow access to
|
|
|
|
msr mdcr_el2, x0 // all PMU counters from EL1
|
2016-01-13 14:50:03 +00:00
|
|
|
4:
|
2015-09-02 17:49:28 +00:00
|
|
|
|
2012-11-06 19:27:59 +00:00
|
|
|
/* Stage-2 translation */
|
|
|
|
msr vttbr_el2, xzr
|
|
|
|
|
2014-02-19 09:33:14 +00:00
|
|
|
cbz x2, install_el2_stub
|
|
|
|
|
|
|
|
mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
|
|
|
isb
|
|
|
|
ret
|
|
|
|
|
|
|
|
install_el2_stub:
|
2012-10-19 16:46:27 +00:00
|
|
|
/* Hypervisor stub */
|
2014-11-21 21:50:39 +00:00
|
|
|
adrp x0, __hyp_stub_vectors
|
|
|
|
add x0, x0, #:lo12:__hyp_stub_vectors
|
2012-10-19 16:46:27 +00:00
|
|
|
msr vbar_el2, x0
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/* spsr */
|
|
|
|
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
|
|
|
PSR_MODE_EL1h)
|
|
|
|
msr spsr_el2, x0
|
|
|
|
msr elr_el2, lr
|
2013-10-11 13:52:16 +00:00
|
|
|
mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
2012-03-05 11:49:27 +00:00
|
|
|
eret
|
|
|
|
ENDPROC(el2_setup)
|
|
|
|
|
2013-10-11 13:52:16 +00:00
|
|
|
/*
|
|
|
|
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
|
|
|
|
* in x20. See arch/arm64/include/asm/virt.h for more info.
|
|
|
|
*/
|
2016-04-18 15:09:41 +00:00
|
|
|
set_cpu_boot_mode_flag:
|
2015-03-17 08:14:29 +00:00
|
|
|
adr_l x1, __boot_cpu_mode
|
2013-10-11 13:52:16 +00:00
|
|
|
cmp w20, #BOOT_CPU_MODE_EL2
|
|
|
|
b.ne 1f
|
|
|
|
add x1, x1, #4
|
2014-05-02 15:24:13 +00:00
|
|
|
1: str w20, [x1] // This CPU has booted in EL1
|
|
|
|
dmb sy
|
|
|
|
dc ivac, x1 // Invalidate potentially stale cache line
|
2013-10-11 13:52:16 +00:00
|
|
|
ret
|
|
|
|
ENDPROC(set_cpu_boot_mode_flag)
|
|
|
|
|
2012-10-26 14:40:05 +00:00
|
|
|
/*
|
|
|
|
* We need to find out the CPU boot mode long after boot, so we need to
|
|
|
|
* store it in a writable variable.
|
|
|
|
*
|
|
|
|
* This is not in .bss, because we set it sufficiently early that the boot-time
|
|
|
|
* zeroing of .bss would clobber it.
|
|
|
|
*/
|
2014-03-26 18:25:55 +00:00
|
|
|
.pushsection .data..cacheline_aligned
|
|
|
|
.align L1_CACHE_SHIFT
|
2015-03-13 15:21:18 +00:00
|
|
|
ENTRY(__boot_cpu_mode)
|
2012-10-26 14:40:05 +00:00
|
|
|
.long BOOT_CPU_MODE_EL2
|
2015-03-13 16:14:36 +00:00
|
|
|
.long BOOT_CPU_MODE_EL1
|
2012-10-26 14:40:05 +00:00
|
|
|
.popsection
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* This provides a "holding pen" for platforms to hold all secondary
|
|
|
|
* cores are held until we're ready for them to initialise.
|
|
|
|
*/
|
|
|
|
ENTRY(secondary_holding_pen)
|
2013-10-11 13:52:16 +00:00
|
|
|
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
|
|
|
|
bl set_cpu_boot_mode_flag
|
2012-03-05 11:49:27 +00:00
|
|
|
mrs x0, mpidr_el1
|
2016-04-18 15:09:45 +00:00
|
|
|
mov_q x1, MPIDR_HWID_BITMASK
|
2012-08-29 17:32:18 +00:00
|
|
|
and x0, x0, x1
|
2015-03-10 14:00:03 +00:00
|
|
|
adr_l x3, secondary_holding_pen_release
|
2012-03-05 11:49:27 +00:00
|
|
|
pen: ldr x4, [x3]
|
|
|
|
cmp x4, x0
|
|
|
|
b.eq secondary_startup
|
|
|
|
wfe
|
|
|
|
b pen
|
|
|
|
ENDPROC(secondary_holding_pen)
|
arm64: factor out spin-table boot method
The arm64 kernel has an internal holding pen, which is necessary for
some systems where we can't bring CPUs online individually and must hold
multiple CPUs in a safe area until the kernel is able to handle them.
The current SMP infrastructure for arm64 is closely coupled to this
holding pen, and alternative boot methods must launch CPUs into the pen,
where they sit before they are launched into the kernel proper.
With PSCI (and possibly other future boot methods), we can bring CPUs
online individually, and need not perform the secondary_holding_pen
dance. Instead, this patch factors the holding pen management code out
to the spin-table boot method code, as it is the only boot method
requiring the pen.
A new entry point for secondaries, secondary_entry is added for other
boot methods to use, which bypasses the holding pen and its associated
overhead when bringing CPUs online. The smp.pen.text section is also
removed, as the pen can live in head.text without problem.
The cpu_operations structure is extended with two new functions,
cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
performing any post-boot cleanup required by a bootmethod (e.g.
resetting the secondary_holding_pen_release to INVALID_HWID).
Documentation is added for cpu_operations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2013-10-24 19:30:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Secondary entry point that jumps straight into the kernel. Only to
|
|
|
|
* be used where CPUs are brought online dynamically by the kernel.
|
|
|
|
*/
|
|
|
|
ENTRY(secondary_entry)
|
|
|
|
bl el2_setup // Drop to EL1
|
2013-11-18 18:56:42 +00:00
|
|
|
bl set_cpu_boot_mode_flag
|
arm64: factor out spin-table boot method
The arm64 kernel has an internal holding pen, which is necessary for
some systems where we can't bring CPUs online individually and must hold
multiple CPUs in a safe area until the kernel is able to handle them.
The current SMP infrastructure for arm64 is closely coupled to this
holding pen, and alternative boot methods must launch CPUs into the pen,
where they sit before they are launched into the kernel proper.
With PSCI (and possibly other future boot methods), we can bring CPUs
online individually, and need not perform the secondary_holding_pen
dance. Instead, this patch factors the holding pen management code out
to the spin-table boot method code, as it is the only boot method
requiring the pen.
A new entry point for secondaries, secondary_entry is added for other
boot methods to use, which bypasses the holding pen and its associated
overhead when bringing CPUs online. The smp.pen.text section is also
removed, as the pen can live in head.text without problem.
The cpu_operations structure is extended with two new functions,
cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
performing any post-boot cleanup required by a bootmethod (e.g.
resetting the secondary_holding_pen_release to INVALID_HWID).
Documentation is added for cpu_operations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2013-10-24 19:30:16 +00:00
|
|
|
b secondary_startup
|
|
|
|
ENDPROC(secondary_entry)
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-04-18 15:09:41 +00:00
|
|
|
secondary_startup:
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Common entry point for secondary CPUs.
|
|
|
|
*/
|
2015-03-17 08:14:29 +00:00
|
|
|
adrp x25, idmap_pg_dir
|
|
|
|
adrp x26, swapper_pg_dir
|
2015-03-18 14:55:20 +00:00
|
|
|
bl __cpu_setup // initialise processor
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-04-18 15:09:42 +00:00
|
|
|
adr_l x27, __secondary_switch // address to jump to after enabling the MMU
|
2012-03-05 11:49:27 +00:00
|
|
|
b __enable_mmu
|
|
|
|
ENDPROC(secondary_startup)
|
|
|
|
|
2016-04-18 15:09:41 +00:00
|
|
|
__secondary_switched:
|
2015-12-26 11:46:40 +00:00
|
|
|
adr_l x5, vectors
|
|
|
|
msr vbar_el1, x5
|
|
|
|
isb
|
|
|
|
|
2016-02-23 10:31:42 +00:00
|
|
|
adr_l x0, secondary_data
|
|
|
|
ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
|
2012-03-05 11:49:27 +00:00
|
|
|
mov sp, x0
|
2015-12-04 11:02:25 +00:00
|
|
|
and x0, x0, #~(THREAD_SIZE - 1)
|
|
|
|
msr sp_el0, x0 // save thread_info
|
2012-03-05 11:49:27 +00:00
|
|
|
mov x29, #0
|
|
|
|
b secondary_start_kernel
|
|
|
|
ENDPROC(__secondary_switched)
|
|
|
|
|
2016-02-23 10:31:42 +00:00
|
|
|
/*
|
|
|
|
* The booting CPU updates the failed status @__early_cpu_boot_status,
|
|
|
|
* with MMU turned off.
|
|
|
|
*
|
|
|
|
* update_early_cpu_boot_status tmp, status
|
|
|
|
* - Corrupts tmp1, tmp2
|
|
|
|
* - Writes 'status' to __early_cpu_boot_status and makes sure
|
|
|
|
* it is committed to memory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
.macro update_early_cpu_boot_status status, tmp1, tmp2
|
|
|
|
mov \tmp2, #\status
|
|
|
|
str_l \tmp2, __early_cpu_boot_status, \tmp1
|
|
|
|
dmb sy
|
|
|
|
dc ivac, \tmp1 // Invalidate potentially stale cache line
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.pushsection .data..cacheline_aligned
|
|
|
|
.align L1_CACHE_SHIFT
|
|
|
|
ENTRY(__early_cpu_boot_status)
|
|
|
|
.long 0
|
|
|
|
.popsection
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
2015-03-17 07:59:53 +00:00
|
|
|
* Enable the MMU.
|
2012-03-05 11:49:27 +00:00
|
|
|
*
|
2015-03-17 07:59:53 +00:00
|
|
|
* x0 = SCTLR_EL1 value for turning on the MMU.
|
|
|
|
* x27 = *virtual* address to jump to upon completion
|
|
|
|
*
|
2015-10-19 13:19:35 +00:00
|
|
|
* Other registers depend on the function called upon completion.
|
|
|
|
*
|
|
|
|
* Checks if the selected granule size is supported by the CPU.
|
|
|
|
* If it isn't, park the CPU
|
2012-03-05 11:49:27 +00:00
|
|
|
*/
|
2015-06-01 11:40:33 +00:00
|
|
|
.section ".idmap.text", "ax"
|
2016-04-27 16:47:07 +00:00
|
|
|
ENTRY(__enable_mmu)
|
2016-03-21 17:35:11 +00:00
|
|
|
mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value
|
2015-10-19 13:19:35 +00:00
|
|
|
mrs x1, ID_AA64MMFR0_EL1
|
|
|
|
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
|
|
|
|
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
|
|
|
|
b.ne __no_granule_support
|
2016-02-23 10:31:42 +00:00
|
|
|
update_early_cpu_boot_status 0, x1, x2
|
2012-03-05 11:49:27 +00:00
|
|
|
msr ttbr0_el1, x25 // load TTBR0
|
|
|
|
msr ttbr1_el1, x26 // load TTBR1
|
|
|
|
isb
|
|
|
|
msr sctlr_el1, x0
|
|
|
|
isb
|
2015-08-04 16:49:36 +00:00
|
|
|
/*
|
|
|
|
* Invalidate the local I-cache so that any instructions fetched
|
|
|
|
* speculatively from the PoC are discarded, since they may have
|
|
|
|
* been dynamically patched at the PoU.
|
|
|
|
*/
|
|
|
|
ic iallu
|
|
|
|
dsb nsh
|
|
|
|
isb
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
|
|
mov x19, x0 // preserve new SCTLR_EL1 value
|
|
|
|
blr x27
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we return here, we have a KASLR displacement in x23 which we need
|
|
|
|
* to take into account by discarding the current kernel mapping and
|
|
|
|
* creating a new one.
|
|
|
|
*/
|
2016-03-21 17:35:11 +00:00
|
|
|
msr sctlr_el1, x22 // disable the MMU
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
isb
|
|
|
|
bl __create_page_tables // recreate kernel mapping
|
|
|
|
|
|
|
|
msr sctlr_el1, x19 // re-enable the MMU
|
|
|
|
isb
|
2016-03-15 11:22:57 +00:00
|
|
|
ic iallu // flush instructions fetched
|
|
|
|
dsb nsh // via old mapping
|
|
|
|
isb
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
#endif
|
2012-03-05 11:49:27 +00:00
|
|
|
br x27
|
2015-03-17 07:59:53 +00:00
|
|
|
ENDPROC(__enable_mmu)
|
2015-10-19 13:19:35 +00:00
|
|
|
|
|
|
|
__no_granule_support:
|
2016-02-23 10:31:42 +00:00
|
|
|
/* Indicate that this CPU can't boot and is stuck in the kernel */
|
|
|
|
update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
|
|
|
|
1:
|
2015-10-19 13:19:35 +00:00
|
|
|
wfe
|
2016-02-23 10:31:42 +00:00
|
|
|
wfi
|
|
|
|
b 1b
|
2015-10-19 13:19:35 +00:00
|
|
|
ENDPROC(__no_granule_support)
|
2016-04-18 15:09:42 +00:00
|
|
|
|
2016-04-18 15:09:43 +00:00
|
|
|
__primary_switch:
|
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
|
|
/*
|
|
|
|
* Iterate over each entry in the relocation table, and apply the
|
|
|
|
* relocations in place.
|
|
|
|
*/
|
|
|
|
ldr w8, =__dynsym_offset // offset to symbol table
|
|
|
|
ldr w9, =__rela_offset // offset to reloc table
|
|
|
|
ldr w10, =__rela_size // size of reloc table
|
|
|
|
|
2016-04-18 15:09:45 +00:00
|
|
|
mov_q x11, KIMAGE_VADDR // default virtual offset
|
2016-04-18 15:09:43 +00:00
|
|
|
add x11, x11, x23 // actual virtual offset
|
|
|
|
add x8, x8, x11 // __va(.dynsym)
|
|
|
|
add x9, x9, x11 // __va(.rela)
|
|
|
|
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
|
|
|
|
|
|
|
|
0: cmp x9, x10
|
|
|
|
b.hs 2f
|
|
|
|
ldp x11, x12, [x9], #24
|
|
|
|
ldr x13, [x9, #-8]
|
|
|
|
cmp w12, #R_AARCH64_RELATIVE
|
|
|
|
b.ne 1f
|
|
|
|
add x13, x13, x23 // relocate
|
|
|
|
str x13, [x11, x23]
|
|
|
|
b 0b
|
|
|
|
|
|
|
|
1: cmp w12, #R_AARCH64_ABS64
|
|
|
|
b.ne 0b
|
|
|
|
add x12, x12, x12, lsl #1 // symtab offset: 24x top word
|
|
|
|
add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
|
|
|
|
ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
|
|
|
|
ldr x15, [x12, #8] // Elf64_Sym::st_value
|
|
|
|
cmp w14, #-0xf // SHN_ABS (0xfff1) ?
|
|
|
|
add x14, x15, x23 // relocate
|
|
|
|
csel x15, x14, x15, ne
|
|
|
|
add x15, x13, x15
|
|
|
|
str x15, [x11, x23]
|
|
|
|
b 0b
|
|
|
|
|
|
|
|
2:
|
|
|
|
#endif
|
|
|
|
ldr x8, =__primary_switched
|
|
|
|
br x8
|
|
|
|
ENDPROC(__primary_switch)
|
|
|
|
|
2016-04-18 15:09:42 +00:00
|
|
|
__secondary_switch:
|
|
|
|
ldr x8, =__secondary_switched
|
|
|
|
br x8
|
|
|
|
ENDPROC(__secondary_switch)
|