b611303811
Resume from hibernate needs to clean any text executed by the kernel with the MMU off to the PoC. Collect these functions together into the .idmap.text section as all this code is tightly coupled and also needs the same cleaning after resume. Data is more complicated, secondary_holding_pen_release is written with the MMU on, clean and invalidated, then read with the MMU off. In contrast __boot_cpu_mode is written with the MMU off, the corresponding cache line is invalidated, so when we read it with the MMU on we don't get stale data. These cache maintenance operations conflict with each other if the values are within a Cache Writeback Granule (CWG) of each other. Collect the data into two sections .mmuoff.data.read and .mmuoff.data.write, the linker script ensures mmuoff.data.write section is aligned to the architectural maximum CWG of 2KB. Signed-off-by: James Morse <james.morse@arm.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
242 lines
5.6 KiB
ArmAsm
242 lines
5.6 KiB
ArmAsm
/*
|
|
* ld script to make ARM Linux kernel
|
|
* taken from the i386 version by Russell King
|
|
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
|
*/
|
|
|
|
#include <asm-generic/vmlinux.lds.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/kernel-pgtable.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
#include "image.h"
|
|
|
|
/* .exit.text needed in case of alternative patching */
|
|
#define ARM_EXIT_KEEP(x) x
|
|
#define ARM_EXIT_DISCARD(x)
|
|
|
|
OUTPUT_ARCH(aarch64)
|
|
ENTRY(_text)
|
|
|
|
jiffies = jiffies_64;
|
|
|
|
#define HYPERVISOR_TEXT \
|
|
/* \
|
|
* Align to 4 KB so that \
|
|
* a) the HYP vector table is at its minimum \
|
|
* alignment of 2048 bytes \
|
|
* b) the HYP init code will not cross a page \
|
|
* boundary if its size does not exceed \
|
|
* 4 KB (see related ASSERT() below) \
|
|
*/ \
|
|
. = ALIGN(SZ_4K); \
|
|
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
|
|
*(.hyp.idmap.text) \
|
|
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
|
|
VMLINUX_SYMBOL(__hyp_text_start) = .; \
|
|
*(.hyp.text) \
|
|
VMLINUX_SYMBOL(__hyp_text_end) = .;
|
|
|
|
#define IDMAP_TEXT \
|
|
. = ALIGN(SZ_4K); \
|
|
VMLINUX_SYMBOL(__idmap_text_start) = .; \
|
|
*(.idmap.text) \
|
|
VMLINUX_SYMBOL(__idmap_text_end) = .;
|
|
|
|
#ifdef CONFIG_HIBERNATION
|
|
#define HIBERNATE_TEXT \
|
|
. = ALIGN(SZ_4K); \
|
|
VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
|
|
*(.hibernate_exit.text) \
|
|
VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
|
|
#else
|
|
#define HIBERNATE_TEXT
|
|
#endif
|
|
|
|
/*
|
|
* The size of the PE/COFF section that covers the kernel image, which
|
|
* runs from stext to _edata, must be a round multiple of the PE/COFF
|
|
* FileAlignment, which we set to its minimum value of 0x200. 'stext'
|
|
* itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
|
|
* boundary should be sufficient.
|
|
*/
|
|
PECOFF_FILE_ALIGNMENT = 0x200;
|
|
|
|
#ifdef CONFIG_EFI
|
|
#define PECOFF_EDATA_PADDING \
|
|
.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
|
|
#else
|
|
#define PECOFF_EDATA_PADDING
|
|
#endif
|
|
|
|
#if defined(CONFIG_DEBUG_ALIGN_RODATA)
|
|
/*
|
|
* 4 KB granule: 1 level 2 entry
|
|
* 16 KB granule: 128 level 3 entries, with contiguous bit
|
|
* 64 KB granule: 32 level 3 entries, with contiguous bit
|
|
*/
|
|
#define SEGMENT_ALIGN SZ_2M
|
|
#else
|
|
/*
|
|
* 4 KB granule: 16 level 3 entries, with contiguous bit
|
|
* 16 KB granule: 4 level 3 entries, without contiguous bit
|
|
* 64 KB granule: 1 level 3 entry
|
|
*/
|
|
#define SEGMENT_ALIGN SZ_64K
|
|
#endif
|
|
|
|
SECTIONS
|
|
{
|
|
/*
|
|
* XXX: The linker does not define how output sections are
|
|
* assigned to input sections when there are multiple statements
|
|
* matching the same input section name. There is no documented
|
|
* order of matching.
|
|
*/
|
|
/DISCARD/ : {
|
|
ARM_EXIT_DISCARD(EXIT_TEXT)
|
|
ARM_EXIT_DISCARD(EXIT_DATA)
|
|
EXIT_CALL
|
|
*(.discard)
|
|
*(.discard.*)
|
|
*(.interp .dynamic)
|
|
*(.dynsym .dynstr .hash)
|
|
}
|
|
|
|
. = KIMAGE_VADDR + TEXT_OFFSET;
|
|
|
|
.head.text : {
|
|
_text = .;
|
|
HEAD_TEXT
|
|
}
|
|
.text : { /* Real text segment */
|
|
_stext = .; /* Text and read-only data */
|
|
__exception_text_start = .;
|
|
*(.exception.text)
|
|
__exception_text_end = .;
|
|
IRQENTRY_TEXT
|
|
SOFTIRQENTRY_TEXT
|
|
ENTRY_TEXT
|
|
TEXT_TEXT
|
|
SCHED_TEXT
|
|
LOCK_TEXT
|
|
KPROBES_TEXT
|
|
HYPERVISOR_TEXT
|
|
IDMAP_TEXT
|
|
HIBERNATE_TEXT
|
|
*(.fixup)
|
|
*(.gnu.warning)
|
|
. = ALIGN(16);
|
|
*(.got) /* Global offset table */
|
|
}
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
_etext = .; /* End of text section */
|
|
|
|
RO_DATA(PAGE_SIZE) /* everything from this point to */
|
|
EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
|
|
NOTES
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
__init_begin = .;
|
|
|
|
INIT_TEXT_SECTION(8)
|
|
.exit.text : {
|
|
ARM_EXIT_KEEP(EXIT_TEXT)
|
|
}
|
|
|
|
.init.data : {
|
|
INIT_DATA
|
|
INIT_SETUP(16)
|
|
INIT_CALLS
|
|
CON_INITCALL
|
|
SECURITY_INITCALL
|
|
INIT_RAM_FS
|
|
*(.init.rodata.* .init.bss) /* from the EFI stub */
|
|
}
|
|
.exit.data : {
|
|
ARM_EXIT_KEEP(EXIT_DATA)
|
|
}
|
|
|
|
PERCPU_SECTION(L1_CACHE_BYTES)
|
|
|
|
. = ALIGN(4);
|
|
.altinstructions : {
|
|
__alt_instructions = .;
|
|
*(.altinstructions)
|
|
__alt_instructions_end = .;
|
|
}
|
|
.altinstr_replacement : {
|
|
*(.altinstr_replacement)
|
|
}
|
|
.rela : ALIGN(8) {
|
|
*(.rela .rela*)
|
|
}
|
|
|
|
__rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
|
|
__rela_size = SIZEOF(.rela);
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
__init_end = .;
|
|
|
|
_data = .;
|
|
_sdata = .;
|
|
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
|
|
|
/*
|
|
* Data written with the MMU off but read with the MMU on requires
|
|
* cache lines to be invalidated, discarding up to a Cache Writeback
|
|
* Granule (CWG) of data from the cache. Keep the section that
|
|
* requires this type of maintenance to be in its own Cache Writeback
|
|
* Granule (CWG) area so the cache maintenance operations don't
|
|
* interfere with adjacent data.
|
|
*/
|
|
.mmuoff.data.write : ALIGN(SZ_2K) {
|
|
__mmuoff_data_start = .;
|
|
*(.mmuoff.data.write)
|
|
}
|
|
. = ALIGN(SZ_2K);
|
|
.mmuoff.data.read : {
|
|
*(.mmuoff.data.read)
|
|
__mmuoff_data_end = .;
|
|
}
|
|
|
|
PECOFF_EDATA_PADDING
|
|
_edata = .;
|
|
|
|
BSS_SECTION(0, 0, 0)
|
|
|
|
. = ALIGN(PAGE_SIZE);
|
|
idmap_pg_dir = .;
|
|
. += IDMAP_DIR_SIZE;
|
|
swapper_pg_dir = .;
|
|
. += SWAPPER_DIR_SIZE;
|
|
|
|
_end = .;
|
|
|
|
STABS_DEBUG
|
|
|
|
HEAD_SYMBOLS
|
|
}
|
|
|
|
/*
|
|
* The HYP init code and ID map text can't be longer than a page each,
|
|
* and should not cross a page boundary.
|
|
*/
|
|
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
|
|
"HYP init code too big or misaligned")
|
|
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
|
|
"ID map text too big or misaligned")
|
|
#ifdef CONFIG_HIBERNATION
|
|
ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
|
|
<= SZ_4K, "Hibernate exit text too big or misaligned")
|
|
#endif
|
|
|
|
/*
|
|
* If padding is applied before .head.text, virt<->phys conversions will fail.
|
|
*/
|
|
ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
|