s390: setup kernel memory layout early

Currently there are two separate places where kernel memory layout has
to be known and adjusted:
1. early kasan setup.
2. paging setup later.

Those 2 places had to be kept in sync and adjusted to reflect peculiar
technical details of one another. With additional factors which influence
kernel memory layout like ultravisor secure storage limit, complexity
of keeping two things in sync grew up even more.

Besides that if we look forward towards creating identity mapping and
enabling DAT before jumping into uncompressed kernel - that would also
require full knowledge of and control over kernel memory layout.

So, de-duplicate and move kernel memory layout setup logic into
the decompressor.

Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Vasily Gorbik 2020-10-06 22:12:39 +02:00
parent b5415c8f97
commit 0c4f2623b9
10 changed files with 136 additions and 96 deletions

View File

@ -24,6 +24,7 @@ void __printf(1, 2) decompressor_printk(const char *fmt, ...);
extern const char kernel_version[];
extern unsigned long memory_limit;
extern unsigned long vmalloc_size;
extern int vmalloc_size_set;
extern int kaslr_enabled;

View File

@ -12,13 +12,13 @@
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
int __bootdata(noexec_disabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
int vmalloc_size_set;
int kaslr_enabled;

View File

@ -5,6 +5,7 @@
#include <asm/sections.h>
#include <asm/cpu_mf.h>
#include <asm/setup.h>
#include <asm/kasan.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
@ -15,6 +16,12 @@
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);
u64 __bootdata_preserved(stfle_fac_list[16]);
@ -172,6 +179,86 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#endif
}
static void setup_kernel_memory_layout(void)
{
bool vmalloc_size_verified = false;
unsigned long vmemmap_off;
unsigned long vspace_left;
unsigned long rte_size;
unsigned long pages;
unsigned long vmax;
pages = ident_map_size / PAGE_SIZE;
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
if (IS_ENABLED(CONFIG_KASAN) ||
vmalloc_size > _REGION2_SIZE ||
vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
vmax = _REGION1_SIZE;
else
vmax = _REGION2_SIZE;
/* keep vmemmap_off aligned to a top level region table entry */
rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
MODULES_END = vmax;
if (is_prot_virt_host()) {
/*
* forcing modules and vmalloc area under the ultravisor
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
*/
adjust_to_uv_max(&MODULES_END);
}
#ifdef CONFIG_KASAN
if (MODULES_END < vmax) {
/* force vmalloc and modules below kasan shadow */
MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
} else {
/*
* leave vmalloc and modules above kasan shadow but make
* sure they don't overlap with it
*/
vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
vmalloc_size_verified = true;
vspace_left = KASAN_SHADOW_START;
}
#endif
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
if (vmalloc_size_verified) {
VMALLOC_START = VMALLOC_END - vmalloc_size;
} else {
vmemmap_off = round_up(ident_map_size, rte_size);
if (vmemmap_off + vmemmap_size > VMALLOC_END ||
vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
/*
* allow vmalloc area to occupy up to 1/2 of
* the rest virtual space left.
*/
vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
}
VMALLOC_START = VMALLOC_END - vmalloc_size;
vspace_left = VMALLOC_START;
}
pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
pages = SECTION_ALIGN_UP(pages);
vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
/* keep vmemmap left most starting from a fresh region table entry */
vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
/* take care that identity map is lower then vmemmap */
ident_map_size = min(ident_map_size, vmemmap_off);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
vmemmap = (struct page *)vmemmap_off;
}
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
@ -211,6 +298,7 @@ void startup_kernel(void)
parse_boot_command_line();
setup_ident_map_size(detect_memory());
setup_vmalloc_size();
setup_kernel_memory_layout();
random_lma = __kaslr_offset = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {

View File

@ -44,3 +44,28 @@ void uv_query_info(void)
prot_virt_guest = 1;
#endif
}
#if IS_ENABLED(CONFIG_KVM)
static bool has_uv_sec_stor_limit(void)
{
/*
* keep these conditions in line with setup_uv()
*/
if (!is_prot_virt_host())
return false;
if (is_prot_virt_guest())
return false;
if (!test_facility(158))
return false;
return !!uv_info.max_sec_stor_addr;
}
void adjust_to_uv_max(unsigned long *vmax)
{
if (has_uv_sec_stor_limit())
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
}
#endif

View File

@ -16,7 +16,6 @@
extern void kasan_early_init(void);
extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);
extern unsigned long kasan_vmax;
/*
* Estimate kasan memory requirements, which it will reserve

View File

@ -17,6 +17,7 @@
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
#include <asm/sections.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
@ -86,16 +87,16 @@ extern unsigned long zero_page_mask;
* happen without trampolines and in addition the placement within a
* 2GB frame is branch prediction unit friendly.
*/
extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
extern unsigned long __bootdata_preserved(VMALLOC_START);
extern unsigned long __bootdata_preserved(VMALLOC_END);
#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
extern struct page *vmemmap;
extern unsigned long vmemmap_size;
extern struct page *__bootdata_preserved(vmemmap);
extern unsigned long __bootdata_preserved(vmemmap_size);
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
extern unsigned long __bootdata_preserved(MODULES_VADDR);
extern unsigned long __bootdata_preserved(MODULES_END);
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)

View File

@ -89,7 +89,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
extern unsigned long vmalloc_size;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;

View File

@ -96,7 +96,6 @@ unsigned long int_hwcap = 0;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
unsigned long __bootdata(vmalloc_size);
struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
@ -545,53 +544,10 @@ static void __init setup_resources(void)
#endif
}
static void __init setup_ident_map_size(void)
static void __init setup_memory_end(void)
{
unsigned long vmax, tmp;
/* Choose kernel address space layout: 3 or 4 levels. */
tmp = ident_map_size / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
vmax = _REGION1_SIZE; /* 4-level kernel page table */
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
if (is_prot_virt_host())
adjust_to_uv_max(&MODULES_END);
#ifdef CONFIG_KASAN
vmax = _REGION1_SIZE;
MODULES_END = kasan_vmax;
#endif
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
VMALLOC_START = VMALLOC_END - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
tmp = SECTION_ALIGN_UP(tmp);
tmp = VMALLOC_START - tmp * sizeof(struct page);
tmp &= ~((vmax >> 11) - 1); /* align to page table level */
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
vmemmap = (struct page *) tmp;
/* Take care that ident_map_size <= vmemmap */
ident_map_size = min(ident_map_size, (unsigned long)vmemmap);
#ifdef CONFIG_KASAN
ident_map_size = min(ident_map_size, KASAN_SHADOW_START);
#endif
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
#ifdef CONFIG_KASAN
/* move vmemmap above kasan shadow only if stands in a way */
if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
(unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
#endif
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
memblock_remove(ident_map_size, ULONG_MAX);
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
@ -1132,7 +1088,7 @@ void __init setup_arch(char **cmdline_p)
remove_oldmem();
setup_uv();
setup_ident_map_size();
setup_memory_end();
setup_memory();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();

View File

@ -52,7 +52,7 @@ void __init setup_uv(void)
unsigned long uv_stor_base;
/*
* keep these conditions in line with kasan init code has_uv_sec_stor_limit()
* keep these conditions in line with has_uv_sec_stor_limit()
*/
if (!is_prot_virt_host())
return;
@ -91,12 +91,6 @@ fail:
prot_virt_host = 0;
}
void adjust_to_uv_max(unsigned long *vmax)
{
if (uv_info.max_sec_stor_addr)
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
}
/*
* Requests the Ultravisor to pin the page in the shared state. This will
* cause an intercept when the guest attempts to unshare the pinned page.

View File

@ -13,7 +13,6 @@
#include <asm/setup.h>
#include <asm/uv.h>
unsigned long kasan_vmax;
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata;
@ -251,28 +250,9 @@ static void __init kasan_early_detect_facilities(void)
}
}
static bool __init has_uv_sec_stor_limit(void)
{
/*
* keep these conditions in line with setup_uv()
*/
if (!is_prot_virt_host())
return false;
if (is_prot_virt_guest())
return false;
if (!test_facility(158))
return false;
return !!uv_info.max_sec_stor_addr;
}
void __init kasan_early_init(void)
{
unsigned long untracked_mem_end;
unsigned long shadow_alloc_size;
unsigned long vmax_unlimited;
unsigned long initrd_end;
unsigned long memsize;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
@ -306,9 +286,6 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
if (has_uv_sec_stor_limit())
kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d,
@ -375,18 +352,18 @@ void __init kasan_early_init(void)
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_pgtable_populate(__sha(untracked_mem_end), __sha(kasan_vmax),
kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_mem_end),
kasan_early_pgtable_populate(__sha(ident_map_size),
IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
__sha(VMALLOC_START) :
__sha(MODULES_VADDR),
POPULATE_ZERO_SHADOW);
kasan_early_pgtable_populate(__sha(kasan_vmax), __sha(vmax_unlimited),
kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;