2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/memory.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000-2002 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*
|
|
|
|
* Note: this file should not be included by non-asm/.h files
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_MEMORY_H
|
|
|
|
#define __ASM_MEMORY_H
|
|
|
|
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/const.h>
|
|
|
|
#include <linux/types.h>
|
2016-02-16 12:52:42 +00:00
|
|
|
#include <asm/bug.h>
|
2017-07-14 18:43:56 +00:00
|
|
|
#include <asm/page-def.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm/sizes.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow for constants defined here to be used from assembly code
|
|
|
|
* by prepending the UL suffix only with actual C code compilation.
|
|
|
|
*/
|
|
|
|
#define UL(x) _AC(x, UL)
|
|
|
|
|
arm64: Fix overlapping VA allocations
PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but
commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI")
extended this to cover the full 32MiB. The final 8KiB of this 32MiB is
also allocated for the fixmap, allowing for potential clashes between
the two.
This change was masked by assumptions in mem_init and the page table
dumping code, which assumed the I/O space to be 16MiB long through
seaparte hard-coded definitions.
This patch changes the definition of the PCI I/O space allocation to
live in asm/memory.h, along with the other VA space allocations. As the
fixmap allocation depends on the number of fixmap entries, this is moved
below the PCI I/O space allocation. Both the fixmap and PCI I/O space
are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB
are moved over use new PCI_IO_{START,END} definitions, which will keep
in sync with the size of the IO space (now restored to 16MiB).
As a useful side effect, the use of the new PCI_IO_{START,END}
definitions prevents a build issue in the dumping code due to a (now
redundant) missing include of io.h for PCI_IOBASE.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
[catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-22 18:20:35 +00:00
|
|
|
/*
|
|
|
|
* Size of the PCI I/O space. This must remain a power of two so that
|
|
|
|
* IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
|
|
|
|
*/
|
|
|
|
#define PCI_IO_SIZE SZ_16M
|
|
|
|
|
2016-03-30 14:46:00 +00:00
|
|
|
/*
|
|
|
|
* Log2 of the upper bound of the size of a struct page. Used for sizing
|
|
|
|
* the vmemmap region only, does not affect actual memory footprint.
|
|
|
|
* We don't use sizeof(struct page) directly since taking its size here
|
|
|
|
* requires its definition to be available at this point in the inclusion
|
|
|
|
* chain, and it may not be a power of 2 in the first place.
|
|
|
|
*/
|
|
|
|
#define STRUCT_PAGE_MAX_SHIFT 6
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VMEMMAP_SIZE - allows the whole linear region to be covered by
|
|
|
|
* a struct page array
|
|
|
|
*/
|
|
|
|
#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
2016-06-01 11:07:17 +00:00
|
|
|
* PAGE_OFFSET - the virtual address of the start of the linear map (top
|
2013-10-23 15:50:07 +00:00
|
|
|
* (VA_BITS - 1))
|
2016-06-01 11:07:17 +00:00
|
|
|
* KIMAGE_VADDR - the virtual address of the start of the kernel image
|
2012-03-05 11:49:27 +00:00
|
|
|
* VA_BITS - the maximum number of bits for virtual addresses.
|
2015-09-17 09:38:07 +00:00
|
|
|
* VA_START - the first kernel virtual address.
|
2012-03-05 11:49:27 +00:00
|
|
|
*/
|
2014-05-12 09:40:38 +00:00
|
|
|
#define VA_BITS (CONFIG_ARM64_VA_BITS)
|
2017-08-03 18:03:58 +00:00
|
|
|
#define VA_START (UL(0xffffffffffffffff) - \
|
|
|
|
(UL(1) << VA_BITS) + 1)
|
|
|
|
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
|
|
|
|
(UL(1) << (VA_BITS - 1)) + 1)
|
2016-02-16 12:52:40 +00:00
|
|
|
#define KIMAGE_VADDR (MODULES_END)
|
|
|
|
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
|
|
|
|
#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
#define MODULES_VSIZE (SZ_128M)
|
2016-03-30 14:46:00 +00:00
|
|
|
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
|
|
|
|
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
|
arm64: Fix overlapping VA allocations
PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but
commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI")
extended this to cover the full 32MiB. The final 8KiB of this 32MiB is
also allocated for the fixmap, allowing for potential clashes between
the two.
This change was masked by assumptions in mem_init and the page table
dumping code, which assumed the I/O space to be 16MiB long through
seaparte hard-coded definitions.
This patch changes the definition of the PCI I/O space allocation to
live in asm/memory.h, along with the other VA space allocations. As the
fixmap allocation depends on the number of fixmap entries, this is moved
below the PCI I/O space allocation. Both the fixmap and PCI I/O space
are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB
are moved over use new PCI_IO_{START,END} definitions, which will keep
in sync with the size of the IO space (now restored to 16MiB).
As a useful side effect, the use of the new PCI_IO_{START,END}
definitions prevents a build issue in the dumping code due to a (now
redundant) missing include of io.h for PCI_IOBASE.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
[catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-22 18:20:35 +00:00
|
|
|
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
|
|
|
|
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-04-27 16:47:09 +00:00
|
|
|
#define KERNEL_START _text
|
|
|
|
#define KERNEL_END _end
|
|
|
|
|
2016-02-16 12:52:40 +00:00
|
|
|
/*
|
2017-10-03 17:25:46 +00:00
|
|
|
* KASAN requires 1/8th of the kernel virtual address space for the shadow
|
|
|
|
* region. KASAN can bloat the stack significantly, so double the (minimum)
|
|
|
|
* stack size when KASAN is in use.
|
2016-02-16 12:52:40 +00:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_KASAN
|
2018-02-06 23:36:44 +00:00
|
|
|
#define KASAN_SHADOW_SCALE_SHIFT 3
|
|
|
|
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
|
2017-10-03 17:25:46 +00:00
|
|
|
#define KASAN_THREAD_SHIFT 1
|
2016-02-16 12:52:40 +00:00
|
|
|
#else
|
|
|
|
#define KASAN_SHADOW_SIZE (0)
|
2017-10-03 17:25:46 +00:00
|
|
|
#define KASAN_THREAD_SHIFT 0
|
2016-02-16 12:52:40 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-03 17:25:46 +00:00
|
|
|
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
|
2017-07-21 13:25:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
|
|
|
|
* stacks are a multiple of page size.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
|
|
|
|
#define THREAD_SHIFT PAGE_SHIFT
|
|
|
|
#else
|
|
|
|
#define THREAD_SHIFT MIN_THREAD_SHIFT
|
|
|
|
#endif
|
2017-07-14 15:39:21 +00:00
|
|
|
|
|
|
|
#if THREAD_SHIFT >= PAGE_SHIFT
|
|
|
|
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define THREAD_SIZE (UL(1) << THREAD_SHIFT)
|
|
|
|
|
2017-07-21 13:25:33 +00:00
|
|
|
/*
|
|
|
|
* By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
|
|
|
|
* checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
|
|
|
|
* assembly.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
#define THREAD_ALIGN (2 * THREAD_SIZE)
|
|
|
|
#else
|
|
|
|
#define THREAD_ALIGN THREAD_SIZE
|
|
|
|
#endif
|
|
|
|
|
2017-07-20 11:26:48 +00:00
|
|
|
#define IRQ_STACK_SIZE THREAD_SIZE
|
|
|
|
|
arm64: add VMAP_STACK overflow detection
This patch adds stack overflow detection to arm64, usable when vmap'd stacks
are in use.
Overflow is detected in a small preamble executed for each exception entry,
which checks whether there is enough space on the current stack for the general
purpose registers to be saved. If there is not enough space, the overflow
handler is invoked on a per-cpu overflow stack. This approach preserves the
original exception information in ESR_EL1 (and where appropriate, FAR_EL1).
Task and IRQ stacks are aligned to double their size, enabling overflow to be
detected with a single bit test. For example, a 16K stack is aligned to 32K,
ensuring that bit 14 of the SP must be zero. On an overflow (or underflow),
this bit is flipped. Thus, overflow (of less than the size of the stack) can be
detected by testing whether this bit is set.
The overflow check is performed before any attempt is made to access the
stack, avoiding recursive faults (and the loss of exception information
these would entail). As logical operations cannot be performed on the SP
directly, the SP is temporarily swapped with a general purpose register
using arithmetic operations to enable the test to be performed.
This gives us a useful error message on stack overflow, as can be trigger with
the LKDTM overflow test:
[ 305.388749] lkdtm: Performing direct entry OVERFLOW
[ 305.395444] Insufficient stack space to handle exception!
[ 305.395482] ESR: 0x96000047 -- DABT (current EL)
[ 305.399890] FAR: 0xffff00000a5e7f30
[ 305.401315] Task stack: [0xffff00000a5e8000..0xffff00000a5ec000]
[ 305.403815] IRQ stack: [0xffff000008000000..0xffff000008004000]
[ 305.407035] Overflow stack: [0xffff80003efce4e0..0xffff80003efcf4e0]
[ 305.409622] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.412785] Hardware name: linux,dummy-virt (DT)
[ 305.415756] task: ffff80003d051c00 task.stack: ffff00000a5e8000
[ 305.419221] PC is at recursive_loop+0x10/0x48
[ 305.421637] LR is at recursive_loop+0x38/0x48
[ 305.423768] pc : [<ffff00000859f330>] lr : [<ffff00000859f358>] pstate: 40000145
[ 305.428020] sp : ffff00000a5e7f50
[ 305.430469] x29: ffff00000a5e8350 x28: ffff80003d051c00
[ 305.433191] x27: ffff000008981000 x26: ffff000008f80400
[ 305.439012] x25: ffff00000a5ebeb8 x24: ffff00000a5ebeb8
[ 305.440369] x23: ffff000008f80138 x22: 0000000000000009
[ 305.442241] x21: ffff80003ce65000 x20: ffff000008f80188
[ 305.444552] x19: 0000000000000013 x18: 0000000000000006
[ 305.446032] x17: 0000ffffa2601280 x16: ffff0000081fe0b8
[ 305.448252] x15: ffff000008ff546d x14: 000000000047a4c8
[ 305.450246] x13: ffff000008ff7872 x12: 0000000005f5e0ff
[ 305.452953] x11: ffff000008ed2548 x10: 000000000005ee8d
[ 305.454824] x9 : ffff000008545380 x8 : ffff00000a5e8770
[ 305.457105] x7 : 1313131313131313 x6 : 00000000000000e1
[ 305.459285] x5 : 0000000000000000 x4 : 0000000000000000
[ 305.461781] x3 : 0000000000000000 x2 : 0000000000000400
[ 305.465119] x1 : 0000000000000013 x0 : 0000000000000012
[ 305.467724] Kernel panic - not syncing: kernel stack overflow
[ 305.470561] CPU: 0 PID: 1219 Comm: sh Not tainted 4.13.0-rc3-00021-g9636aea #5
[ 305.473325] Hardware name: linux,dummy-virt (DT)
[ 305.475070] Call trace:
[ 305.476116] [<ffff000008088ad8>] dump_backtrace+0x0/0x378
[ 305.478991] [<ffff000008088e64>] show_stack+0x14/0x20
[ 305.481237] [<ffff00000895a178>] dump_stack+0x98/0xb8
[ 305.483294] [<ffff0000080c3288>] panic+0x118/0x280
[ 305.485673] [<ffff0000080c2e9c>] nmi_panic+0x6c/0x70
[ 305.486216] [<ffff000008089710>] handle_bad_stack+0x118/0x128
[ 305.486612] Exception stack(0xffff80003efcf3a0 to 0xffff80003efcf4e0)
[ 305.487334] f3a0: 0000000000000012 0000000000000013 0000000000000400 0000000000000000
[ 305.488025] f3c0: 0000000000000000 0000000000000000 00000000000000e1 1313131313131313
[ 305.488908] f3e0: ffff00000a5e8770 ffff000008545380 000000000005ee8d ffff000008ed2548
[ 305.489403] f400: 0000000005f5e0ff ffff000008ff7872 000000000047a4c8 ffff000008ff546d
[ 305.489759] f420: ffff0000081fe0b8 0000ffffa2601280 0000000000000006 0000000000000013
[ 305.490256] f440: ffff000008f80188 ffff80003ce65000 0000000000000009 ffff000008f80138
[ 305.490683] f460: ffff00000a5ebeb8 ffff00000a5ebeb8 ffff000008f80400 ffff000008981000
[ 305.491051] f480: ffff80003d051c00 ffff00000a5e8350 ffff00000859f358 ffff00000a5e7f50
[ 305.491444] f4a0: ffff00000859f330 0000000040000145 0000000000000000 0000000000000000
[ 305.492008] f4c0: 0001000000000000 0000000000000000 ffff00000a5e8350 ffff00000859f330
[ 305.493063] [<ffff00000808205c>] __bad_stack+0x88/0x8c
[ 305.493396] [<ffff00000859f330>] recursive_loop+0x10/0x48
[ 305.493731] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494088] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494425] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494649] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.494898] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495205] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495453] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.495708] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496000] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496302] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496644] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.496894] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497138] [<ffff00000859f358>] recursive_loop+0x38/0x48
[ 305.497325] [<ffff00000859f3dc>] lkdtm_OVERFLOW+0x14/0x20
[ 305.497506] [<ffff00000859f314>] lkdtm_do_action+0x1c/0x28
[ 305.497786] [<ffff00000859f178>] direct_entry+0xe0/0x170
[ 305.498095] [<ffff000008345568>] full_proxy_write+0x60/0xa8
[ 305.498387] [<ffff0000081fb7f4>] __vfs_write+0x1c/0x128
[ 305.498679] [<ffff0000081fcc68>] vfs_write+0xa0/0x1b0
[ 305.498926] [<ffff0000081fe0fc>] SyS_write+0x44/0xa0
[ 305.499182] Exception stack(0xffff00000a5ebec0 to 0xffff00000a5ec000)
[ 305.499429] bec0: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.499674] bee0: 574f4c465245564f 0000000000000000 0000000000000000 8000000080808080
[ 305.499904] bf00: 0000000000000040 0000000000000038 fefefeff1b4bc2ff 7f7f7f7f7f7fff7f
[ 305.500189] bf20: 0101010101010101 0000000000000000 000000000047a4c8 0000000000000038
[ 305.500712] bf40: 0000000000000000 0000ffffa2601280 0000ffffc63f6068 00000000004b5000
[ 305.501241] bf60: 0000000000000001 000000001c4cf5e0 0000000000000009 000000001c4cf5e0
[ 305.501791] bf80: 0000000000000020 0000000000000000 00000000004b5000 000000001c4cc458
[ 305.502314] bfa0: 0000000000000000 0000ffffc63f7950 000000000040a3c4 0000ffffc63f70e0
[ 305.502762] bfc0: 0000ffffa2601268 0000000080000000 0000000000000001 0000000000000040
[ 305.503207] bfe0: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
[ 305.503680] [<ffff000008082fb0>] el0_svc_naked+0x24/0x28
[ 305.504720] Kernel Offset: disabled
[ 305.505189] CPU features: 0x002082
[ 305.505473] Memory Limit: none
[ 305.506181] ---[ end Kernel panic - not syncing: kernel stack overflow
This patch was co-authored by Ard Biesheuvel and Mark Rutland.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
2017-07-14 19:30:35 +00:00
|
|
|
#define OVERFLOW_STACK_SIZE SZ_4K
|
|
|
|
|
2017-07-14 14:38:43 +00:00
|
|
|
/*
|
|
|
|
* Alignment of kernel segments (e.g. .text, .data).
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_DEBUG_ALIGN_RODATA)
|
|
|
|
/*
|
|
|
|
* 4 KB granule: 1 level 2 entry
|
|
|
|
* 16 KB granule: 128 level 3 entries, with contiguous bit
|
|
|
|
* 64 KB granule: 32 level 3 entries, with contiguous bit
|
|
|
|
*/
|
|
|
|
#define SEGMENT_ALIGN SZ_2M
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* 4 KB granule: 16 level 3 entries, with contiguous bit
|
|
|
|
* 16 KB granule: 4 level 3 entries, without contiguous bit
|
|
|
|
* 64 KB granule: 1 level 3 entry
|
|
|
|
*/
|
|
|
|
#define SEGMENT_ALIGN SZ_64K
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Memory types available.
|
|
|
|
*/
|
|
|
|
#define MT_DEVICE_nGnRnE 0
|
|
|
|
#define MT_DEVICE_nGnRE 1
|
|
|
|
#define MT_DEVICE_GRE 2
|
|
|
|
#define MT_NORMAL_NC 3
|
|
|
|
#define MT_NORMAL 4
|
2015-08-07 08:36:59 +00:00
|
|
|
#define MT_NORMAL_WT 5
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2012-12-07 18:35:41 +00:00
|
|
|
/*
|
|
|
|
* Memory types for Stage-2 translation
|
|
|
|
*/
|
|
|
|
#define MT_S2_NORMAL 0xf
|
|
|
|
#define MT_S2_DEVICE_nGnRE 0x1
|
|
|
|
|
2016-02-16 12:52:35 +00:00
|
|
|
#ifdef CONFIG_ARM64_4K_PAGES
|
|
|
|
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
|
|
|
|
#else
|
|
|
|
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
|
|
|
|
#endif
|
|
|
|
|
2016-02-16 12:52:41 +00:00
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
|
#define __early_init_dt_declare_initrd(__start, __end) \
|
|
|
|
do { \
|
|
|
|
initrd_start = (__start); \
|
|
|
|
initrd_end = (__end); \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2016-02-22 17:46:04 +00:00
|
|
|
#include <linux/bitops.h>
|
2016-02-22 17:46:03 +00:00
|
|
|
#include <linux/mmdebug.h>
|
|
|
|
|
2016-02-26 16:57:14 +00:00
|
|
|
extern s64 memstart_addr;
|
2012-03-05 11:49:27 +00:00
|
|
|
/* PHYS_OFFSET - the physical address of the start of memory. */
|
2016-02-22 17:46:03 +00:00
|
|
|
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
|
2016-02-16 12:52:42 +00:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
/* the virtual base of the kernel image (minus TEXT_OFFSET) */
|
|
|
|
extern u64 kimage_vaddr;
|
|
|
|
|
2016-02-16 12:52:42 +00:00
|
|
|
/* the offset between the kernel virtual and physical mappings */
|
|
|
|
extern u64 kimage_voffset;
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-12-20 00:23:06 +00:00
|
|
|
static inline unsigned long kaslr_offset(void)
|
|
|
|
{
|
|
|
|
return kimage_vaddr - KIMAGE_VADDR;
|
|
|
|
}
|
|
|
|
|
2015-08-18 09:34:42 +00:00
|
|
|
/*
|
2016-02-16 12:52:42 +00:00
|
|
|
* Allow all memory at the discovery stage. We will clip it later.
|
2015-08-18 09:34:42 +00:00
|
|
|
*/
|
2016-02-16 12:52:42 +00:00
|
|
|
#define MIN_MEMBLOCK_ADDR 0
|
|
|
|
#define MAX_MEMBLOCK_ADDR U64_MAX
|
2015-08-18 09:34:42 +00:00
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* PFNs are used to describe any physical page; this means
|
|
|
|
* PFN 0 == physical address 0.
|
|
|
|
*
|
|
|
|
* This is the PFN of the first RAM page in the kernel
|
|
|
|
* direct-mapped view. We assume this is the first page
|
|
|
|
* of RAM in the mem_map as well.
|
|
|
|
*/
|
|
|
|
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
|
|
|
|
|
2017-01-10 21:35:47 +00:00
|
|
|
/*
|
|
|
|
* Physical vs virtual RAM address space conversion. These are
|
|
|
|
* private definitions which should NOT be used outside memory.h
|
|
|
|
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
|
|
|
|
*/
|
2017-01-10 21:35:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The linear kernel range starts in the middle of the virtual adddress
|
|
|
|
* space. Testing the top bit for the start of the region is a
|
|
|
|
* sufficient check.
|
|
|
|
*/
|
|
|
|
#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1)))
|
|
|
|
|
|
|
|
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
|
|
|
|
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
|
|
|
|
|
|
|
|
#define __virt_to_phys_nodebug(x) ({ \
|
2017-01-10 21:35:47 +00:00
|
|
|
phys_addr_t __x = (phys_addr_t)(x); \
|
2017-01-10 21:35:50 +00:00
|
|
|
__is_lm_address(__x) ? __lm_to_phys(__x) : \
|
|
|
|
__kimg_to_phys(__x); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_VIRTUAL
|
|
|
|
extern phys_addr_t __virt_to_phys(unsigned long x);
|
|
|
|
extern phys_addr_t __phys_addr_symbol(unsigned long x);
|
|
|
|
#else
|
|
|
|
#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
|
|
|
|
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
|
|
|
|
#endif
|
2017-01-10 21:35:47 +00:00
|
|
|
|
|
|
|
#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
|
|
|
|
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert a page to/from a physical address
|
|
|
|
*/
|
|
|
|
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
|
|
|
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Note: Drivers should NOT use these. They are the wrong
|
|
|
|
* translation for translating DMA addresses. Use the driver
|
|
|
|
* DMA support - see dma-mapping.h.
|
|
|
|
*/
|
2014-07-28 15:25:48 +00:00
|
|
|
#define virt_to_phys virt_to_phys
|
2012-03-05 11:49:27 +00:00
|
|
|
static inline phys_addr_t virt_to_phys(const volatile void *x)
|
|
|
|
{
|
|
|
|
return __virt_to_phys((unsigned long)(x));
|
|
|
|
}
|
|
|
|
|
2014-07-28 15:25:48 +00:00
|
|
|
#define phys_to_virt phys_to_virt
|
2012-03-05 11:49:27 +00:00
|
|
|
static inline void *phys_to_virt(phys_addr_t x)
|
|
|
|
{
|
|
|
|
return (void *)(__phys_to_virt(x));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drivers should NOT use these either.
|
|
|
|
*/
|
|
|
|
#define __pa(x) __virt_to_phys((unsigned long)(x))
|
2017-01-10 21:35:50 +00:00
|
|
|
#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
|
|
|
|
#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
|
2012-03-05 11:49:27 +00:00
|
|
|
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
|
|
|
|
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
2017-01-10 21:35:48 +00:00
|
|
|
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
|
2017-01-10 21:35:49 +00:00
|
|
|
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
|
2012-03-05 11:49:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* virt_to_page(k) convert a _valid_ virtual address to struct page *
|
|
|
|
* virt_addr_valid(k) indicates whether a virtual address is valid
|
|
|
|
*/
|
2014-10-28 05:44:01 +00:00
|
|
|
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-03-30 14:46:01 +00:00
|
|
|
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
2012-03-05 11:49:27 +00:00
|
|
|
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
2016-09-21 22:25:04 +00:00
|
|
|
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
2016-03-30 14:46:01 +00:00
|
|
|
#else
|
|
|
|
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
|
2017-01-18 07:09:25 +00:00
|
|
|
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
|
2016-03-30 14:46:01 +00:00
|
|
|
|
|
|
|
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
|
|
|
|
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-09-21 22:25:04 +00:00
|
|
|
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
|
2016-03-30 14:46:01 +00:00
|
|
|
+ PHYS_OFFSET) >> PAGE_SHIFT)
|
|
|
|
#endif
|
2012-03-05 11:49:27 +00:00
|
|
|
#endif
|
|
|
|
|
2016-09-21 22:25:04 +00:00
|
|
|
#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
|
|
|
|
#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
|
|
|
|
_virt_addr_valid(kaddr))
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm-generic/memory_model.h>
|
|
|
|
|
|
|
|
#endif
|