2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/memory.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000-2002 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*
|
|
|
|
* Note: this file should not be included by non-asm/.h files
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_MEMORY_H
|
|
|
|
#define __ASM_MEMORY_H
|
|
|
|
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/const.h>
|
|
|
|
#include <linux/types.h>
|
2016-02-16 12:52:42 +00:00
|
|
|
#include <asm/bug.h>
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm/sizes.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow for constants defined here to be used from assembly code
|
|
|
|
* by prepending the UL suffix only with actual C code compilation.
|
|
|
|
*/
|
|
|
|
#define UL(x) _AC(x, UL)
|
|
|
|
|
arm64: Fix overlapping VA allocations
PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but
commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI")
extended this to cover the full 32MiB. The final 8KiB of this 32MiB is
also allocated for the fixmap, allowing for potential clashes between
the two.
This change was masked by assumptions in mem_init and the page table
dumping code, which assumed the I/O space to be 16MiB long through
seaparte hard-coded definitions.
This patch changes the definition of the PCI I/O space allocation to
live in asm/memory.h, along with the other VA space allocations. As the
fixmap allocation depends on the number of fixmap entries, this is moved
below the PCI I/O space allocation. Both the fixmap and PCI I/O space
are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB
are moved over use new PCI_IO_{START,END} definitions, which will keep
in sync with the size of the IO space (now restored to 16MiB).
As a useful side effect, the use of the new PCI_IO_{START,END}
definitions prevents a build issue in the dumping code due to a (now
redundant) missing include of io.h for PCI_IOBASE.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
[catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-22 18:20:35 +00:00
|
|
|
/*
|
|
|
|
* Size of the PCI I/O space. This must remain a power of two so that
|
|
|
|
* IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
|
|
|
|
*/
|
|
|
|
#define PCI_IO_SIZE SZ_16M
|
|
|
|
|
2016-03-30 14:46:00 +00:00
|
|
|
/*
|
|
|
|
* Log2 of the upper bound of the size of a struct page. Used for sizing
|
|
|
|
* the vmemmap region only, does not affect actual memory footprint.
|
|
|
|
* We don't use sizeof(struct page) directly since taking its size here
|
|
|
|
* requires its definition to be available at this point in the inclusion
|
|
|
|
* chain, and it may not be a power of 2 in the first place.
|
|
|
|
*/
|
|
|
|
#define STRUCT_PAGE_MAX_SHIFT 6
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VMEMMAP_SIZE - allows the whole linear region to be covered by
|
|
|
|
* a struct page array
|
|
|
|
*/
|
|
|
|
#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
2016-06-01 11:07:17 +00:00
|
|
|
* PAGE_OFFSET - the virtual address of the start of the linear map (top
|
2013-10-23 15:50:07 +00:00
|
|
|
* (VA_BITS - 1))
|
2016-06-01 11:07:17 +00:00
|
|
|
* KIMAGE_VADDR - the virtual address of the start of the kernel image
|
2012-03-05 11:49:27 +00:00
|
|
|
* VA_BITS - the maximum number of bits for virtual addresses.
|
2015-09-17 09:38:07 +00:00
|
|
|
* VA_START - the first kernel virtual address.
|
2012-03-05 11:49:27 +00:00
|
|
|
* TASK_SIZE - the maximum size of a user space task.
|
|
|
|
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
|
|
|
|
*/
|
2014-05-12 09:40:38 +00:00
|
|
|
#define VA_BITS (CONFIG_ARM64_VA_BITS)
|
2015-09-17 09:38:07 +00:00
|
|
|
#define VA_START (UL(0xffffffffffffffff) << VA_BITS)
|
2013-10-23 15:50:07 +00:00
|
|
|
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
|
2016-02-16 12:52:40 +00:00
|
|
|
#define KIMAGE_VADDR (MODULES_END)
|
|
|
|
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
|
|
|
|
#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
#define MODULES_VSIZE (SZ_128M)
|
2016-03-30 14:46:00 +00:00
|
|
|
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
|
|
|
|
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
|
arm64: Fix overlapping VA allocations
PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but
commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI")
extended this to cover the full 32MiB. The final 8KiB of this 32MiB is
also allocated for the fixmap, allowing for potential clashes between
the two.
This change was masked by assumptions in mem_init and the page table
dumping code, which assumed the I/O space to be 16MiB long through
seaparte hard-coded definitions.
This patch changes the definition of the PCI I/O space allocation to
live in asm/memory.h, along with the other VA space allocations. As the
fixmap allocation depends on the number of fixmap entries, this is moved
below the PCI I/O space allocation. Both the fixmap and PCI I/O space
are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB
are moved over use new PCI_IO_{START,END} definitions, which will keep
in sync with the size of the IO space (now restored to 16MiB).
As a useful side effect, the use of the new PCI_IO_{START,END}
definitions prevents a build issue in the dumping code due to a (now
redundant) missing include of io.h for PCI_IOBASE.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
[catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-22 18:20:35 +00:00
|
|
|
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
|
|
|
|
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
|
2012-03-05 11:49:27 +00:00
|
|
|
#define TASK_SIZE_64 (UL(1) << VA_BITS)
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
#define TASK_SIZE_32 UL(0x100000000)
|
|
|
|
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
|
|
|
|
TASK_SIZE_32 : TASK_SIZE_64)
|
2014-06-18 20:10:09 +00:00
|
|
|
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
|
|
|
|
TASK_SIZE_32 : TASK_SIZE_64)
|
2012-03-05 11:49:27 +00:00
|
|
|
#else
|
|
|
|
#define TASK_SIZE TASK_SIZE_64
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
|
|
|
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
|
|
|
|
|
2016-04-27 16:47:09 +00:00
|
|
|
#define KERNEL_START _text
|
|
|
|
#define KERNEL_END _end
|
|
|
|
|
2016-02-16 12:52:40 +00:00
|
|
|
/*
|
|
|
|
* The size of the KASAN shadow region. This should be 1/8th of the
|
|
|
|
* size of the entire kernel virtual address space.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
|
|
|
|
#else
|
|
|
|
#define KASAN_SHADOW_SIZE (0)
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* Physical vs virtual RAM address space conversion. These are
|
|
|
|
* private definitions which should NOT be used outside memory.h
|
|
|
|
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
|
|
|
|
*/
|
2016-02-16 12:52:36 +00:00
|
|
|
#define __virt_to_phys(x) ({ \
|
|
|
|
phys_addr_t __x = (phys_addr_t)(x); \
|
2016-02-22 17:46:04 +00:00
|
|
|
__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
|
|
|
|
(__x - kimage_voffset); })
|
2016-02-16 12:52:36 +00:00
|
|
|
|
2016-02-22 17:46:04 +00:00
|
|
|
#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
|
2016-02-16 12:52:42 +00:00
|
|
|
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
|
2012-03-05 11:49:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert a page to/from a physical address
|
|
|
|
*/
|
|
|
|
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
|
|
|
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Memory types available.
|
|
|
|
*/
|
|
|
|
#define MT_DEVICE_nGnRnE 0
|
|
|
|
#define MT_DEVICE_nGnRE 1
|
|
|
|
#define MT_DEVICE_GRE 2
|
|
|
|
#define MT_NORMAL_NC 3
|
|
|
|
#define MT_NORMAL 4
|
2015-08-07 08:36:59 +00:00
|
|
|
#define MT_NORMAL_WT 5
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2012-12-07 18:35:41 +00:00
|
|
|
/*
|
|
|
|
* Memory types for Stage-2 translation
|
|
|
|
*/
|
|
|
|
#define MT_S2_NORMAL 0xf
|
|
|
|
#define MT_S2_DEVICE_nGnRE 0x1
|
|
|
|
|
2016-02-16 12:52:35 +00:00
|
|
|
#ifdef CONFIG_ARM64_4K_PAGES
|
|
|
|
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
|
|
|
|
#else
|
|
|
|
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
|
|
|
|
#endif
|
|
|
|
|
2016-02-16 12:52:41 +00:00
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
|
#define __early_init_dt_declare_initrd(__start, __end) \
|
|
|
|
do { \
|
|
|
|
initrd_start = (__start); \
|
|
|
|
initrd_end = (__end); \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2016-02-22 17:46:04 +00:00
|
|
|
#include <linux/bitops.h>
|
2016-02-22 17:46:03 +00:00
|
|
|
#include <linux/mmdebug.h>
|
|
|
|
|
2016-02-26 16:57:14 +00:00
|
|
|
extern s64 memstart_addr;
|
2012-03-05 11:49:27 +00:00
|
|
|
/* PHYS_OFFSET - the physical address of the start of memory. */
|
2016-02-22 17:46:03 +00:00
|
|
|
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
|
2016-02-16 12:52:42 +00:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 13:12:01 +00:00
|
|
|
/* the virtual base of the kernel image (minus TEXT_OFFSET) */
|
|
|
|
extern u64 kimage_vaddr;
|
|
|
|
|
2016-02-16 12:52:42 +00:00
|
|
|
/* the offset between the kernel virtual and physical mappings */
|
|
|
|
extern u64 kimage_voffset;
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-12-20 00:23:06 +00:00
|
|
|
static inline unsigned long kaslr_offset(void)
|
|
|
|
{
|
|
|
|
return kimage_vaddr - KIMAGE_VADDR;
|
|
|
|
}
|
|
|
|
|
2015-08-18 09:34:42 +00:00
|
|
|
/*
|
2016-02-16 12:52:42 +00:00
|
|
|
* Allow all memory at the discovery stage. We will clip it later.
|
2015-08-18 09:34:42 +00:00
|
|
|
*/
|
2016-02-16 12:52:42 +00:00
|
|
|
#define MIN_MEMBLOCK_ADDR 0
|
|
|
|
#define MAX_MEMBLOCK_ADDR U64_MAX
|
2015-08-18 09:34:42 +00:00
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
/*
|
|
|
|
* PFNs are used to describe any physical page; this means
|
|
|
|
* PFN 0 == physical address 0.
|
|
|
|
*
|
|
|
|
* This is the PFN of the first RAM page in the kernel
|
|
|
|
* direct-mapped view. We assume this is the first page
|
|
|
|
* of RAM in the mem_map as well.
|
|
|
|
*/
|
|
|
|
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: Drivers should NOT use these. They are the wrong
|
|
|
|
* translation for translating DMA addresses. Use the driver
|
|
|
|
* DMA support - see dma-mapping.h.
|
|
|
|
*/
|
2014-07-28 15:25:48 +00:00
|
|
|
#define virt_to_phys virt_to_phys
|
2012-03-05 11:49:27 +00:00
|
|
|
static inline phys_addr_t virt_to_phys(const volatile void *x)
|
|
|
|
{
|
|
|
|
return __virt_to_phys((unsigned long)(x));
|
|
|
|
}
|
|
|
|
|
2014-07-28 15:25:48 +00:00
|
|
|
#define phys_to_virt phys_to_virt
|
2012-03-05 11:49:27 +00:00
|
|
|
static inline void *phys_to_virt(phys_addr_t x)
|
|
|
|
{
|
|
|
|
return (void *)(__phys_to_virt(x));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drivers should NOT use these either.
|
|
|
|
*/
|
|
|
|
#define __pa(x) __virt_to_phys((unsigned long)(x))
|
|
|
|
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
|
|
|
|
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
2014-05-08 15:48:13 +00:00
|
|
|
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
|
2012-03-05 11:49:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* virt_to_page(k) convert a _valid_ virtual address to struct page *
|
|
|
|
* virt_addr_valid(k) indicates whether a virtual address is valid
|
|
|
|
*/
|
2014-10-28 05:44:01 +00:00
|
|
|
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-03-30 14:46:01 +00:00
|
|
|
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
2012-03-05 11:49:27 +00:00
|
|
|
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
2016-09-21 22:25:04 +00:00
|
|
|
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
2016-03-30 14:46:01 +00:00
|
|
|
#else
|
|
|
|
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
|
2016-10-21 08:58:46 +00:00
|
|
|
#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
|
2016-03-30 14:46:01 +00:00
|
|
|
|
|
|
|
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
|
|
|
|
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
|
2012-03-05 11:49:27 +00:00
|
|
|
|
2016-09-21 22:25:04 +00:00
|
|
|
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
|
2016-03-30 14:46:01 +00:00
|
|
|
+ PHYS_OFFSET) >> PAGE_SHIFT)
|
|
|
|
#endif
|
2012-03-05 11:49:27 +00:00
|
|
|
#endif
|
|
|
|
|
2016-09-21 22:25:04 +00:00
|
|
|
#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
|
|
|
|
#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
|
|
|
|
_virt_addr_valid(kaddr))
|
|
|
|
|
2012-03-05 11:49:27 +00:00
|
|
|
#include <asm-generic/memory_model.h>
|
|
|
|
|
|
|
|
#endif
|