2018-07-19 11:11:28 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef BOOT_BOOT_H
|
|
|
|
#define BOOT_BOOT_H
|
|
|
|
|
2021-06-30 15:17:53 +00:00
|
|
|
#include <linux/types.h>
|
2020-11-10 16:05:26 +00:00
|
|
|
|
2022-04-27 02:14:34 +00:00
|
|
|
#define IPL_START 0x200
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2023-02-02 12:59:36 +00:00
|
|
|
#include <asm/physmem_info.h>
|
|
|
|
|
2022-12-04 20:15:41 +00:00
|
|
|
struct machine_info {
|
|
|
|
unsigned char has_edat1 : 1;
|
|
|
|
unsigned char has_edat2 : 1;
|
|
|
|
unsigned char has_nx : 1;
|
|
|
|
};
|
|
|
|
|
2022-05-05 14:54:54 +00:00
|
|
|
struct vmlinux_info {
|
2022-12-13 10:35:11 +00:00
|
|
|
unsigned long entry;
|
2022-05-05 14:54:54 +00:00
|
|
|
unsigned long image_size; /* does not include .bss */
|
|
|
|
unsigned long bss_size; /* uncompressed image .bss size */
|
|
|
|
unsigned long bootdata_off;
|
|
|
|
unsigned long bootdata_size;
|
|
|
|
unsigned long bootdata_preserved_off;
|
|
|
|
unsigned long bootdata_preserved_size;
|
2024-02-21 10:51:55 +00:00
|
|
|
unsigned long got_start;
|
|
|
|
unsigned long got_end;
|
2022-05-05 14:54:54 +00:00
|
|
|
unsigned long amode31_size;
|
2022-12-13 10:35:11 +00:00
|
|
|
unsigned long init_mm_off;
|
|
|
|
unsigned long swapper_pg_dir_off;
|
|
|
|
unsigned long invalid_pg_dir_off;
|
2024-07-16 11:50:54 +00:00
|
|
|
unsigned long alt_instructions;
|
|
|
|
unsigned long alt_instructions_end;
|
2023-02-09 21:05:11 +00:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
unsigned long kasan_early_shadow_page_off;
|
|
|
|
unsigned long kasan_early_shadow_pte_off;
|
|
|
|
unsigned long kasan_early_shadow_pmd_off;
|
|
|
|
unsigned long kasan_early_shadow_pud_off;
|
|
|
|
unsigned long kasan_early_shadow_p4d_off;
|
|
|
|
#endif
|
2022-05-05 14:54:54 +00:00
|
|
|
};
|
|
|
|
|
2018-07-19 11:11:28 +00:00
|
|
|
void startup_kernel(void);
|
2023-02-02 12:59:36 +00:00
|
|
|
unsigned long detect_max_physmem_end(void);
|
|
|
|
void detect_physmem_online_ranges(unsigned long max_physmem_end);
|
2023-02-08 17:11:25 +00:00
|
|
|
void physmem_set_usable_limit(unsigned long limit);
|
2023-02-02 12:59:36 +00:00
|
|
|
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
|
|
|
|
void physmem_free(enum reserved_range_type type);
|
|
|
|
/* for continuous/multiple allocations per type */
|
|
|
|
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
|
|
|
|
unsigned long align);
|
|
|
|
/* for single allocations, 1 per type */
|
|
|
|
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
|
|
|
|
unsigned long align, unsigned long min, unsigned long max,
|
|
|
|
bool die_on_oom);
|
2023-02-21 22:08:42 +00:00
|
|
|
unsigned long get_physmem_alloc_pos(void);
|
2023-02-02 12:59:36 +00:00
|
|
|
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
|
|
|
|
unsigned long *intersection_start);
|
2020-10-19 09:01:33 +00:00
|
|
|
bool is_ipl_block_dump(void);
|
2018-05-15 11:28:53 +00:00
|
|
|
void store_ipl_parmblock(void);
|
2023-02-02 12:59:36 +00:00
|
|
|
int read_ipl_report(void);
|
|
|
|
void save_ipl_cert_comp_list(void);
|
2018-05-15 11:28:53 +00:00
|
|
|
void setup_boot_command_line(void);
|
2019-02-27 15:52:42 +00:00
|
|
|
void parse_boot_command_line(void);
|
2019-07-17 17:38:42 +00:00
|
|
|
void verify_facilities(void);
|
2019-02-27 16:36:35 +00:00
|
|
|
void print_missing_facilities(void);
|
2020-11-05 12:09:06 +00:00
|
|
|
void sclp_early_setup_buffer(void);
|
2019-08-08 18:09:08 +00:00
|
|
|
void print_pgm_check_info(void);
|
2023-02-21 22:08:42 +00:00
|
|
|
unsigned long randomize_within_range(unsigned long size, unsigned long align,
|
|
|
|
unsigned long min, unsigned long max);
|
s390/mm: Uncouple physical vs virtual address spaces
The uncoupling physical vs virtual address spaces brings
the following benefits to s390:
- virtual memory layout flexibility;
- closes the address gap between kernel and modules, it
caused s390-only problems in the past (e.g. 'perf' bugs);
- allows getting rid of trampolines used for module calls
into kernel;
- allows simplifying BPF trampoline;
- minor performance improvement in branch prediction;
- kernel randomization entropy is magnitude bigger, as it is
derived from the amount of available virtual, not physical
memory;
The whole change could be described in two pictures below:
before and after the change.
Some aspects of the virtual memory layout setup are not
clarified (number of page levels, alignment, DMA memory),
since these are not a part of this change or secondary
with regard to how the uncoupling itself is implemented.
The focus of the pictures is to explain why __va() and __pa()
macros are implemented the way they are.
Memory layout in V==R mode:
| Physical | Virtual |
+- 0 --------------+- 0 --------------+ identity mapping start
| | S390_lowcore | Low-address memory
| +- 8 KB -----------+
| | |
| | identity | phys == virt
| | mapping | virt == phys
| | |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt start
| | |
| | |
+- __kaslr_offset, __kaslr_offset_phys| kernel rand. phys/virt start
| | |
| kernel text/data | kernel text/data | phys == kvirt
| | |
+------------------+------------------+ kernel phys/virt end
| | |
| | |
| | |
| | |
+- ident_map_size -+- ident_map_size -+ identity mapping end
| |
| ... unused gap |
| |
+---- vmemmap -----+ 'struct page' array start
| |
| virtually mapped |
| memory map |
| |
+- __abs_lowcore --+
| |
| Absolute Lowcore |
| |
+- __memcpy_real_area
| |
| Real Memory Copy|
| |
+- VMALLOC_START --+ vmalloc area start
| |
| vmalloc area |
| |
+- MODULES_VADDR --+ modules area start
| |
| modules area |
| |
+------------------+ UltraVisor Secure Storage limit
| |
| ... unused gap |
| |
+KASAN_SHADOW_START+ KASAN shadow memory start
| |
| KASAN shadow |
| |
+------------------+ ASCE limit
Memory layout in V!=R mode:
| Physical | Virtual |
+- 0 --------------+- 0 --------------+
| | S390_lowcore | Low-address memory
| +- 8 KB -----------+
| | |
| | |
| | ... unused gap |
| | |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt end (<2GB)
| | |
| | |
+- __kaslr_offset_phys | kernel rand. phys start
| | |
| kernel text/data | |
| | |
+------------------+ | kernel phys end
| | |
| | |
| | |
| | |
+- ident_map_size -+ |
| |
| ... unused gap |
| |
+- __identity_base + identity mapping start (>= 2GB)
| |
| identity | phys == virt - __identity_base
| mapping | virt == phys + __identity_base
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
+---- vmemmap -----+ 'struct page' array start
| |
| virtually mapped |
| memory map |
| |
+- __abs_lowcore --+
| |
| Absolute Lowcore |
| |
+- __memcpy_real_area
| |
| Real Memory Copy|
| |
+- VMALLOC_START --+ vmalloc area start
| |
| vmalloc area |
| |
+- MODULES_VADDR --+ modules area start
| |
| modules area |
| |
+- __kaslr_offset -+ kernel rand. virt start
| |
| kernel text/data | phys == (kvirt - __kaslr_offset) +
| | __kaslr_offset_phys
+- kernel .bss end + kernel rand. virt end
| |
| ... unused gap |
| |
+------------------+ UltraVisor Secure Storage limit
| |
| ... unused gap |
| |
+KASAN_SHADOW_START+ KASAN shadow memory start
| |
| KASAN shadow |
| |
+------------------+ ASCE limit
Unused gaps in the virtual memory layout could be present
or not - depending on how partucular system is configured.
No page tables are created for the unused gaps.
The relative order of vmalloc, modules and kernel image in
virtual memory is defined by following considerations:
- start of the modules area and end of the kernel should reside
within 4GB to accommodate relative 32-bit jumps. The best way
to achieve that is to place kernel next to modules;
- vmalloc and module areas should locate next to each other
to prevent failures and extra reworks in user level tools
(makedumpfile, crash, etc.) which treat vmalloc and module
addresses similarily;
- kernel needs to be the last area in the virtual memory
layout to easily distinguish between kernel and non-kernel
virtual addresses. That is needed to (again) simplify
handling of addresses in user level tools and make __pa()
macro faster (see below);
Concluding the above, the relative order of the considered
virtual areas in memory is: vmalloc - modules - kernel.
Therefore, the only change to the current memory layout is
moving kernel to the end of virtual address space.
With that approach the implementation of __pa() macro is
straightforward - all linear virtual addresses less than
kernel base are considered identity mapping:
phys == virt - __identity_base
All addresses greater than kernel base are kernel ones:
phys == (kvirt - __kaslr_offset) + __kaslr_offset_phys
By contrast, __va() macro deals only with identity mapping
addresses:
virt == phys + __identity_base
.amode31 section is mapped separately and is not covered by
__pa() macro. In fact, it could have been handled easily by
checking whether a virtual address is within the section or
not, but there is no need for that. Thus, let __pa() code
do as little machine cycles as possible.
The KASAN shadow memory is located at the very end of the
virtual memory layout, at addresses higher than the kernel.
However, that is not a linear mapping and no code other than
KASAN instrumentation or API is expected to access it.
When KASLR mode is enabled the kernel base address randomized
within a memory window that spans whole unused virtual address
space. The size of that window depends from the amount of
physical memory available to the system, the limit imposed by
UltraVisor (if present) and the vmalloc area size as provided
by vmalloc= kernel command line parameter.
In case the virtual memory is exhausted the minimum size of
the randomization window is forcefully set to 2GB, which
amounts to in 15 bits of entropy if KASAN is enabled or 17
bits of entropy in default configuration.
The default kernel offset 0x100000 is used as a magic value
both in the decompressor code and vmlinux linker script, but
it will be removed with a follow-up change.
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
2024-03-01 06:15:22 +00:00
|
|
|
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
|
2024-09-04 09:39:28 +00:00
|
|
|
void __printf(1, 2) boot_printk(const char *fmt, ...);
|
2023-02-02 12:59:36 +00:00
|
|
|
void print_stacktrace(unsigned long sp);
|
2022-05-05 14:54:54 +00:00
|
|
|
void error(char *m);
|
s390/mm: Uncouple physical vs virtual address spaces
The uncoupling physical vs virtual address spaces brings
the following benefits to s390:
- virtual memory layout flexibility;
- closes the address gap between kernel and modules, it
caused s390-only problems in the past (e.g. 'perf' bugs);
- allows getting rid of trampolines used for module calls
into kernel;
- allows simplifying BPF trampoline;
- minor performance improvement in branch prediction;
- kernel randomization entropy is magnitude bigger, as it is
derived from the amount of available virtual, not physical
memory;
The whole change could be described in two pictures below:
before and after the change.
Some aspects of the virtual memory layout setup are not
clarified (number of page levels, alignment, DMA memory),
since these are not a part of this change or secondary
with regard to how the uncoupling itself is implemented.
The focus of the pictures is to explain why __va() and __pa()
macros are implemented the way they are.
Memory layout in V==R mode:
| Physical | Virtual |
+- 0 --------------+- 0 --------------+ identity mapping start
| | S390_lowcore | Low-address memory
| +- 8 KB -----------+
| | |
| | identity | phys == virt
| | mapping | virt == phys
| | |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt start
| | |
| | |
+- __kaslr_offset, __kaslr_offset_phys| kernel rand. phys/virt start
| | |
| kernel text/data | kernel text/data | phys == kvirt
| | |
+------------------+------------------+ kernel phys/virt end
| | |
| | |
| | |
| | |
+- ident_map_size -+- ident_map_size -+ identity mapping end
| |
| ... unused gap |
| |
+---- vmemmap -----+ 'struct page' array start
| |
| virtually mapped |
| memory map |
| |
+- __abs_lowcore --+
| |
| Absolute Lowcore |
| |
+- __memcpy_real_area
| |
| Real Memory Copy|
| |
+- VMALLOC_START --+ vmalloc area start
| |
| vmalloc area |
| |
+- MODULES_VADDR --+ modules area start
| |
| modules area |
| |
+------------------+ UltraVisor Secure Storage limit
| |
| ... unused gap |
| |
+KASAN_SHADOW_START+ KASAN shadow memory start
| |
| KASAN shadow |
| |
+------------------+ ASCE limit
Memory layout in V!=R mode:
| Physical | Virtual |
+- 0 --------------+- 0 --------------+
| | S390_lowcore | Low-address memory
| +- 8 KB -----------+
| | |
| | |
| | ... unused gap |
| | |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt end (<2GB)
| | |
| | |
+- __kaslr_offset_phys | kernel rand. phys start
| | |
| kernel text/data | |
| | |
+------------------+ | kernel phys end
| | |
| | |
| | |
| | |
+- ident_map_size -+ |
| |
| ... unused gap |
| |
+- __identity_base + identity mapping start (>= 2GB)
| |
| identity | phys == virt - __identity_base
| mapping | virt == phys + __identity_base
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
+---- vmemmap -----+ 'struct page' array start
| |
| virtually mapped |
| memory map |
| |
+- __abs_lowcore --+
| |
| Absolute Lowcore |
| |
+- __memcpy_real_area
| |
| Real Memory Copy|
| |
+- VMALLOC_START --+ vmalloc area start
| |
| vmalloc area |
| |
+- MODULES_VADDR --+ modules area start
| |
| modules area |
| |
+- __kaslr_offset -+ kernel rand. virt start
| |
| kernel text/data | phys == (kvirt - __kaslr_offset) +
| | __kaslr_offset_phys
+- kernel .bss end + kernel rand. virt end
| |
| ... unused gap |
| |
+------------------+ UltraVisor Secure Storage limit
| |
| ... unused gap |
| |
+KASAN_SHADOW_START+ KASAN shadow memory start
| |
| KASAN shadow |
| |
+------------------+ ASCE limit
Unused gaps in the virtual memory layout could be present
or not - depending on how partucular system is configured.
No page tables are created for the unused gaps.
The relative order of vmalloc, modules and kernel image in
virtual memory is defined by following considerations:
- start of the modules area and end of the kernel should reside
within 4GB to accommodate relative 32-bit jumps. The best way
to achieve that is to place kernel next to modules;
- vmalloc and module areas should locate next to each other
to prevent failures and extra reworks in user level tools
(makedumpfile, crash, etc.) which treat vmalloc and module
addresses similarily;
- kernel needs to be the last area in the virtual memory
layout to easily distinguish between kernel and non-kernel
virtual addresses. That is needed to (again) simplify
handling of addresses in user level tools and make __pa()
macro faster (see below);
Concluding the above, the relative order of the considered
virtual areas in memory is: vmalloc - modules - kernel.
Therefore, the only change to the current memory layout is
moving kernel to the end of virtual address space.
With that approach the implementation of __pa() macro is
straightforward - all linear virtual addresses less than
kernel base are considered identity mapping:
phys == virt - __identity_base
All addresses greater than kernel base are kernel ones:
phys == (kvirt - __kaslr_offset) + __kaslr_offset_phys
By contrast, __va() macro deals only with identity mapping
addresses:
virt == phys + __identity_base
.amode31 section is mapped separately and is not covered by
__pa() macro. In fact, it could have been handled easily by
checking whether a virtual address is within the section or
not, but there is no need for that. Thus, let __pa() code
do as little machine cycles as possible.
The KASAN shadow memory is located at the very end of the
virtual memory layout, at addresses higher than the kernel.
However, that is not a linear mapping and no code other than
KASAN instrumentation or API is expected to access it.
When KASLR mode is enabled the kernel base address randomized
within a memory window that spans whole unused virtual address
space. The size of that window depends from the amount of
physical memory available to the system, the limit imposed by
UltraVisor (if present) and the vmalloc area size as provided
by vmalloc= kernel command line parameter.
In case the virtual memory is exhausted the minimum size of
the randomization window is forcefully set to 2GB, which
amounts to in 15 bits of entropy if KASAN is enabled or 17
bits of entropy in default configuration.
The default kernel offset 0x100000 is used as a magic value
both in the decompressor code and vmlinux linker script, but
it will be removed with a follow-up change.
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
2024-03-01 06:15:22 +00:00
|
|
|
int get_random(unsigned long limit, unsigned long *value);
|
2019-02-03 20:37:20 +00:00
|
|
|
|
2022-12-04 20:15:41 +00:00
|
|
|
extern struct machine_info machine;
|
|
|
|
|
2021-06-30 15:12:25 +00:00
|
|
|
/* Symbols defined by linker scripts */
|
2019-07-15 13:30:33 +00:00
|
|
|
extern const char kernel_version[];
|
2020-10-19 09:01:33 +00:00
|
|
|
extern unsigned long memory_limit;
|
2020-10-06 20:12:39 +00:00
|
|
|
extern unsigned long vmalloc_size;
|
2020-10-19 09:01:33 +00:00
|
|
|
extern int vmalloc_size_set;
|
2021-06-30 15:12:25 +00:00
|
|
|
extern char __boot_data_start[], __boot_data_end[];
|
|
|
|
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
|
2024-02-21 13:32:24 +00:00
|
|
|
extern char __vmlinux_relocs_64_start[], __vmlinux_relocs_64_end[];
|
2021-06-30 15:12:25 +00:00
|
|
|
extern char _decompressor_syms_start[], _decompressor_syms_end[];
|
2021-06-30 15:17:53 +00:00
|
|
|
extern char _stack_start[], _stack_end[];
|
2023-02-02 12:59:36 +00:00
|
|
|
extern char _end[], _decompressor_end[];
|
2022-05-05 14:54:54 +00:00
|
|
|
extern unsigned char _compressed_start[];
|
|
|
|
extern unsigned char _compressed_end[];
|
|
|
|
extern struct vmlinux_info _vmlinux_info;
|
s390: Add infrastructure to patch lowcore accesses
The s390 architecture defines two special per-CPU data pages
called the "prefix area". In s390-linux terminology this is usually
called "lowcore". This memory area contains system configuration
data like old/new PSW's for system call/interrupt/machine check
handlers and lots of other data. It is normally mapped to logical
address 0. This area can only be accessed when in supervisor mode.
This means that kernel code can dereference NULL pointers, because
accesses to address 0 are allowed. Parts of lowcore can be write
protected, but read accesses and write accesses outside of the write
protected areas are not caught.
To remove this limitation for debugging and testing, remap lowcore to
another address and define a function get_lowcore() which simply
returns the address where lowcore is mapped at. This would normally
introduce a pointer dereference (=memory read). As lowcore is used
for several very often used variables, add code to patch this function
during runtime, so we avoid the memory reads.
For C code get_lowcore() has to be used, for assembly code it is
the GET_LC macro. When using this macro/function a reference is added
to alternative patching. All these locations will be patched to the
actual lowcore location when the kernel is booted or a module is loaded.
To make debugging/bisecting problems easier, this patch adds all the
infrastructure but the lowcore address is still hardwired to 0. This
way the code can be converted on a per function basis, and the
functionality is enabled in a patch after all the functions have
been converted.
Note that this requires at least z16 because the old lpsw instruction
only allowed a 12 bit displacement. z16 introduced lpswey which allows
20 bits (signed), so the lowcore can effectively be mapped from
address 0 - 0x7e000. To use 0x7e000 as address, a 6 byte lgfi
instruction would have to be used in the alternative. To save two
bytes, llilh can be used, but this only allows to set bits 16-31 of
the address. In order to use the llilh instruction, use 0x70000 as
alternative lowcore address. This is still large enough to catch
NULL pointer dereferences into large arrays.
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
2024-07-22 13:41:14 +00:00
|
|
|
|
2022-05-05 14:54:54 +00:00
|
|
|
#define vmlinux _vmlinux_info
|
2019-02-21 13:23:04 +00:00
|
|
|
|
s390: Add infrastructure to patch lowcore accesses
The s390 architecture defines two special per-CPU data pages
called the "prefix area". In s390-linux terminology this is usually
called "lowcore". This memory area contains system configuration
data like old/new PSW's for system call/interrupt/machine check
handlers and lots of other data. It is normally mapped to logical
address 0. This area can only be accessed when in supervisor mode.
This means that kernel code can dereference NULL pointers, because
accesses to address 0 are allowed. Parts of lowcore can be write
protected, but read accesses and write accesses outside of the write
protected areas are not caught.
To remove this limitation for debugging and testing, remap lowcore to
another address and define a function get_lowcore() which simply
returns the address where lowcore is mapped at. This would normally
introduce a pointer dereference (=memory read). As lowcore is used
for several very often used variables, add code to patch this function
during runtime, so we avoid the memory reads.
For C code get_lowcore() has to be used, for assembly code it is
the GET_LC macro. When using this macro/function a reference is added
to alternative patching. All these locations will be patched to the
actual lowcore location when the kernel is booted or a module is loaded.
To make debugging/bisecting problems easier, this patch adds all the
infrastructure but the lowcore address is still hardwired to 0. This
way the code can be converted on a per function basis, and the
functionality is enabled in a patch after all the functions have
been converted.
Note that this requires at least z16 because the old lpsw instruction
only allowed a 12 bit displacement. z16 introduced lpswey which allows
20 bits (signed), so the lowcore can effectively be mapped from
address 0 - 0x7e000. To use 0x7e000 as address, a 6 byte lgfi
instruction would have to be used in the alternative. To save two
bytes, llilh can be used, but this only allows to set bits 16-31 of
the address. In order to use the llilh instruction, use 0x70000 as
alternative lowcore address. This is still large enough to catch
NULL pointer dereferences into large arrays.
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
2024-07-22 13:41:14 +00:00
|
|
|
#define __lowcore_pa(x) ((unsigned long)(x) % sizeof(struct lowcore))
|
2022-12-19 20:08:27 +00:00
|
|
|
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
|
s390/mm: Uncouple physical vs virtual address spaces
The uncoupling physical vs virtual address spaces brings
the following benefits to s390:
- virtual memory layout flexibility;
- closes the address gap between kernel and modules, it
caused s390-only problems in the past (e.g. 'perf' bugs);
- allows getting rid of trampolines used for module calls
into kernel;
- allows simplifying BPF trampoline;
- minor performance improvement in branch prediction;
- kernel randomization entropy is magnitude bigger, as it is
derived from the amount of available virtual, not physical
memory;
The whole change could be described in two pictures below:
before and after the change.
Some aspects of the virtual memory layout setup are not
clarified (number of page levels, alignment, DMA memory),
since these are not a part of this change or secondary
with regard to how the uncoupling itself is implemented.
The focus of the pictures is to explain why __va() and __pa()
macros are implemented the way they are.
Memory layout in V==R mode:
| Physical | Virtual |
+- 0 --------------+- 0 --------------+ identity mapping start
| | S390_lowcore | Low-address memory
| +- 8 KB -----------+
| | |
| | identity | phys == virt
| | mapping | virt == phys
| | |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt start
| | |
| | |
+- __kaslr_offset, __kaslr_offset_phys| kernel rand. phys/virt start
| | |
| kernel text/data | kernel text/data | phys == kvirt
| | |
+------------------+------------------+ kernel phys/virt end
| | |
| | |
| | |
| | |
+- ident_map_size -+- ident_map_size -+ identity mapping end
| |
| ... unused gap |
| |
+---- vmemmap -----+ 'struct page' array start
| |
| virtually mapped |
| memory map |
| |
+- __abs_lowcore --+
| |
| Absolute Lowcore |
| |
+- __memcpy_real_area
| |
| Real Memory Copy|
| |
+- VMALLOC_START --+ vmalloc area start
| |
| vmalloc area |
| |
+- MODULES_VADDR --+ modules area start
| |
| modules area |
| |
+------------------+ UltraVisor Secure Storage limit
| |
| ... unused gap |
| |
+KASAN_SHADOW_START+ KASAN shadow memory start
| |
| KASAN shadow |
| |
+------------------+ ASCE limit
Memory layout in V!=R mode:
| Physical | Virtual |
+- 0 --------------+- 0 --------------+
| | S390_lowcore | Low-address memory
| +- 8 KB -----------+
| | |
| | |
| | ... unused gap |
| | |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt end (<2GB)
| | |
| | |
+- __kaslr_offset_phys | kernel rand. phys start
| | |
| kernel text/data | |
| | |
+------------------+ | kernel phys end
| | |
| | |
| | |
| | |
+- ident_map_size -+ |
| |
| ... unused gap |
| |
+- __identity_base + identity mapping start (>= 2GB)
| |
| identity | phys == virt - __identity_base
| mapping | virt == phys + __identity_base
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
+---- vmemmap -----+ 'struct page' array start
| |
| virtually mapped |
| memory map |
| |
+- __abs_lowcore --+
| |
| Absolute Lowcore |
| |
+- __memcpy_real_area
| |
| Real Memory Copy|
| |
+- VMALLOC_START --+ vmalloc area start
| |
| vmalloc area |
| |
+- MODULES_VADDR --+ modules area start
| |
| modules area |
| |
+- __kaslr_offset -+ kernel rand. virt start
| |
| kernel text/data | phys == (kvirt - __kaslr_offset) +
| | __kaslr_offset_phys
+- kernel .bss end + kernel rand. virt end
| |
| ... unused gap |
| |
+------------------+ UltraVisor Secure Storage limit
| |
| ... unused gap |
| |
+KASAN_SHADOW_START+ KASAN shadow memory start
| |
| KASAN shadow |
| |
+------------------+ ASCE limit
Unused gaps in the virtual memory layout could be present
or not - depending on how partucular system is configured.
No page tables are created for the unused gaps.
The relative order of vmalloc, modules and kernel image in
virtual memory is defined by following considerations:
- start of the modules area and end of the kernel should reside
within 4GB to accommodate relative 32-bit jumps. The best way
to achieve that is to place kernel next to modules;
- vmalloc and module areas should locate next to each other
to prevent failures and extra reworks in user level tools
(makedumpfile, crash, etc.) which treat vmalloc and module
addresses similarily;
- kernel needs to be the last area in the virtual memory
layout to easily distinguish between kernel and non-kernel
virtual addresses. That is needed to (again) simplify
handling of addresses in user level tools and make __pa()
macro faster (see below);
Concluding the above, the relative order of the considered
virtual areas in memory is: vmalloc - modules - kernel.
Therefore, the only change to the current memory layout is
moving kernel to the end of virtual address space.
With that approach the implementation of __pa() macro is
straightforward - all linear virtual addresses less than
kernel base are considered identity mapping:
phys == virt - __identity_base
All addresses greater than kernel base are kernel ones:
phys == (kvirt - __kaslr_offset) + __kaslr_offset_phys
By contrast, __va() macro deals only with identity mapping
addresses:
virt == phys + __identity_base
.amode31 section is mapped separately and is not covered by
__pa() macro. In fact, it could have been handled easily by
checking whether a virtual address is within the section or
not, but there is no need for that. Thus, let __pa() code
do as little machine cycles as possible.
The KASAN shadow memory is located at the very end of the
virtual memory layout, at addresses higher than the kernel.
However, that is not a linear mapping and no code other than
KASAN instrumentation or API is expected to access it.
When KASLR mode is enabled the kernel base address randomized
within a memory window that spans whole unused virtual address
space. The size of that window depends from the amount of
physical memory available to the system, the limit imposed by
UltraVisor (if present) and the vmalloc area size as provided
by vmalloc= kernel command line parameter.
In case the virtual memory is exhausted the minimum size of
the randomization window is forcefully set to 2GB, which
amounts to in 15 bits of entropy if KASAN is enabled or 17
bits of entropy in default configuration.
The default kernel offset 0x100000 is used as a magic value
both in the decompressor code and vmlinux linker script, but
it will be removed with a follow-up change.
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
2024-03-01 06:15:22 +00:00
|
|
|
#define __kernel_va(x) ((void *)((unsigned long)(x) - __kaslr_offset_phys + __kaslr_offset))
|
|
|
|
#define __kernel_pa(x) ((unsigned long)(x) - __kaslr_offset + __kaslr_offset_phys)
|
|
|
|
#define __identity_va(x) ((void *)((unsigned long)(x) + __identity_base))
|
|
|
|
#define __identity_pa(x) ((unsigned long)(x) - __identity_base)
|
2022-12-19 20:08:27 +00:00
|
|
|
|
2023-02-02 12:59:36 +00:00
|
|
|
static inline bool intersects(unsigned long addr0, unsigned long size0,
|
|
|
|
unsigned long addr1, unsigned long size1)
|
|
|
|
{
|
|
|
|
return addr0 + size0 > addr1 && addr1 + size1 > addr0;
|
|
|
|
}
|
2022-04-27 02:14:34 +00:00
|
|
|
#endif /* __ASSEMBLY__ */
|
2018-07-19 11:11:28 +00:00
|
|
|
#endif /* BOOT_BOOT_H */
|