2019-06-03 05:44:50 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-01-07 14:17:13 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013 Huawei Ltd.
|
|
|
|
* Author: Jiang Liu <liuj97@gmail.com>
|
|
|
|
*
|
|
|
|
* Based on arch/arm/kernel/jump_label.c
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/jump_label.h>
|
|
|
|
#include <asm/insn.h>
|
|
|
|
|
2014-11-25 15:44:18 +00:00
|
|
|
void arch_jump_label_transform(struct jump_entry *entry,
|
|
|
|
enum jump_label_type type)
|
2014-01-07 14:17:13 +00:00
|
|
|
{
|
arm64/kernel: jump_label: Switch to relative references
On a randomly chosen distro kernel build for arm64, vmlinux.o shows the
following sections, containing jump label entries, and the associated
RELA relocation records, respectively:
...
[38088] __jump_table PROGBITS 0000000000000000 00e19f30
000000000002ea10 0000000000000000 WA 0 0 8
[38089] .rela__jump_table RELA 0000000000000000 01fd8bb0
000000000008be30 0000000000000018 I 38178 38088 8
...
In other words, we have 190 KB worth of 'struct jump_entry' instances,
and 573 KB worth of RELA entries to relocate each entry's code, target
and key members. This means the RELA section occupies 10% of the .init
segment, and the two sections combined represent 5% of vmlinux's entire
memory footprint.
So let's switch from 64-bit absolute references to 32-bit relative
references for the code and target field, and a 64-bit relative
reference for the 'key' field (which may reside in another module or the
core kernel, which may be more than 4 GB way on arm64 when running with
KASLR enable): this reduces the size of the __jump_table by 33%, and
gets rid of the RELA section entirely.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-s390@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Jessica Yu <jeyu@kernel.org>
Link: https://lkml.kernel.org/r/20180919065144.25010-4-ard.biesheuvel@linaro.org
2018-09-19 06:51:38 +00:00
|
|
|
void *addr = (void *)jump_entry_code(entry);
|
2014-01-07 14:17:13 +00:00
|
|
|
u32 insn;
|
|
|
|
|
2015-07-24 12:45:44 +00:00
|
|
|
if (type == JUMP_LABEL_JMP) {
|
arm64/kernel: jump_label: Switch to relative references
On a randomly chosen distro kernel build for arm64, vmlinux.o shows the
following sections, containing jump label entries, and the associated
RELA relocation records, respectively:
...
[38088] __jump_table PROGBITS 0000000000000000 00e19f30
000000000002ea10 0000000000000000 WA 0 0 8
[38089] .rela__jump_table RELA 0000000000000000 01fd8bb0
000000000008be30 0000000000000018 I 38178 38088 8
...
In other words, we have 190 KB worth of 'struct jump_entry' instances,
and 573 KB worth of RELA entries to relocate each entry's code, target
and key members. This means the RELA section occupies 10% of the .init
segment, and the two sections combined represent 5% of vmlinux's entire
memory footprint.
So let's switch from 64-bit absolute references to 32-bit relative
references for the code and target field, and a 64-bit relative
reference for the 'key' field (which may reside in another module or the
core kernel, which may be more than 4 GB way on arm64 when running with
KASLR enable): this reduces the size of the __jump_table by 33%, and
gets rid of the RELA section entirely.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-s390@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Jessica Yu <jeyu@kernel.org>
Link: https://lkml.kernel.org/r/20180919065144.25010-4-ard.biesheuvel@linaro.org
2018-09-19 06:51:38 +00:00
|
|
|
insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
|
|
|
|
jump_entry_target(entry),
|
2014-01-07 14:17:13 +00:00
|
|
|
AARCH64_INSN_BRANCH_NOLINK);
|
|
|
|
} else {
|
|
|
|
insn = aarch64_insn_gen_nop();
|
|
|
|
}
|
|
|
|
|
2018-08-16 10:45:50 +00:00
|
|
|
aarch64_insn_patch_text_nosync(addr, insn);
|
2014-01-07 14:17:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void arch_jump_label_transform_static(struct jump_entry *entry,
|
|
|
|
enum jump_label_type type)
|
|
|
|
{
|
2014-11-25 15:44:18 +00:00
|
|
|
/*
|
|
|
|
* We use the architected A64 NOP in arch_static_branch, so there's no
|
|
|
|
* need to patch an identical A64 NOP over the top of it here. The core
|
|
|
|
* will call arch_jump_label_transform from a module notifier if the
|
|
|
|
* NOP needs to be replaced by a branch.
|
|
|
|
*/
|
2014-01-07 14:17:13 +00:00
|
|
|
}
|