mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 01:21:28 +00:00
5ade5be4ed
The s390 architecture defines two special per-CPU data pages called the "prefix area". In s390-linux terminology this is usually called "lowcore". This memory area contains system configuration data like old/new PSW's for system call/interrupt/machine check handlers and lots of other data. It is normally mapped to logical address 0. This area can only be accessed when in supervisor mode. This means that kernel code can dereference NULL pointers, because accesses to address 0 are allowed. Parts of lowcore can be write protected, but read accesses and write accesses outside of the write protected areas are not caught. To remove this limitation for debugging and testing, remap lowcore to another address and define a function get_lowcore() which simply returns the address where lowcore is mapped at. This would normally introduce a pointer dereference (=memory read). As lowcore is used for several very often used variables, add code to patch this function during runtime, so we avoid the memory reads. For C code get_lowcore() has to be used, for assembly code it is the GET_LC macro. When using this macro/function a reference is added to alternative patching. All these locations will be patched to the actual lowcore location when the kernel is booted or a module is loaded. To make debugging/bisecting problems easier, this patch adds all the infrastructure but the lowcore address is still hardwired to 0. This way the code can be converted on a per function basis, and the functionality is enabled in a patch after all the functions have been converted. Note that this requires at least z16 because the old lpsw instruction only allowed a 12 bit displacement. z16 introduced lpswey which allows 20 bits (signed), so the lowcore can effectively be mapped from address 0 - 0x7e000. To use 0x7e000 as address, a 6 byte lgfi instruction would have to be used in the alternative. To save two bytes, llilh can be used, but this only allows to set bits 16-31 of the address. In order to use the llilh instruction, use 0x70000 as alternative lowcore address. This is still large enough to catch NULL pointer dereferences into large arrays. Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
48 lines
1.1 KiB
C
48 lines
1.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/pgtable.h>
|
|
#include <asm/abs_lowcore.h>
|
|
|
|
unsigned long __bootdata_preserved(__abs_lowcore);
|
|
int __bootdata_preserved(relocate_lowcore);
|
|
|
|
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
|
|
{
|
|
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
|
|
unsigned long phys = __pa(lc);
|
|
int rc, i;
|
|
|
|
for (i = 0; i < LC_PAGES; i++) {
|
|
rc = __vmem_map_4k_page(addr, phys, PAGE_KERNEL, alloc);
|
|
if (rc) {
|
|
/*
|
|
* Do not unmap allocated page tables in case the
|
|
* allocation was not requested. In such a case the
|
|
* request is expected coming from an atomic context,
|
|
* while the unmap attempt might sleep.
|
|
*/
|
|
if (alloc) {
|
|
for (--i; i >= 0; i--) {
|
|
addr -= PAGE_SIZE;
|
|
vmem_unmap_4k_page(addr);
|
|
}
|
|
}
|
|
return rc;
|
|
}
|
|
addr += PAGE_SIZE;
|
|
phys += PAGE_SIZE;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void abs_lowcore_unmap(int cpu)
|
|
{
|
|
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
|
|
int i;
|
|
|
|
for (i = 0; i < LC_PAGES; i++) {
|
|
vmem_unmap_4k_page(addr);
|
|
addr += PAGE_SIZE;
|
|
}
|
|
}
|