mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
a257e02579
Working around Cortex-A53 erratum #843419 involves special handling of ADRP instructions that end up in the last two instruction slots of a 4k page, or whose output register gets overwritten without having been read. (Note that the latter instruction sequence is never emitted by a properly functioning compiler, which is why it is disregarded by the handling of the same erratum in the bfd.ld linker which we rely on for the core kernel) Normally, this gets taken care of by the linker, which can spot such sequences at final link time, and insert a veneer if the ADRP ends up at a vulnerable offset. However, linux kernel modules are partially linked ELF objects, and so there is no 'final link time' other than the runtime loading of the module, at which time all the static relocations are resolved. For this reason, we have implemented the #843419 workaround for modules by avoiding ADRP instructions altogether, by using the large C model, and by passing -mpc-relative-literal-loads to recent versions of GCC that may emit adrp/ldr pairs to perform literal loads. However, this workaround forces us to keep literal data mixed with the instructions in the executable .text segment, and literal data may inadvertently turn into an exploitable speculative gadget depending on the relative offsets of arbitrary symbols. So let's reimplement this workaround in a way that allows us to switch back to the small C model, and to drop the -mpc-relative-literal-loads GCC switch, by patching affected ADRP instructions at runtime: - ADRP instructions that do not appear at 4k relative offset 0xff8 or 0xffc are ignored - ADRP instructions that are within 1 MB of their target symbol are converted into ADR instructions - remaining ADRP instructions are redirected via a veneer that performs the load using an unaffected movn/movk sequence. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [will: tidied up ADRP -> ADR instruction patching.] [will: use ULL suffix for 64-bit immediate] Signed-off-by: Will Deacon <will.deacon@arm.com>
95 lines
2.8 KiB
C
95 lines
2.8 KiB
C
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_MODULE_H
|
|
#define __ASM_MODULE_H
|
|
|
|
#include <asm-generic/module.h>
|
|
|
|
#define MODULE_ARCH_VERMAGIC "aarch64"
|
|
|
|
#ifdef CONFIG_ARM64_MODULE_PLTS
|
|
struct mod_plt_sec {
|
|
struct elf64_shdr *plt;
|
|
int plt_num_entries;
|
|
int plt_max_entries;
|
|
};
|
|
|
|
struct mod_arch_specific {
|
|
struct mod_plt_sec core;
|
|
struct mod_plt_sec init;
|
|
|
|
/* for CONFIG_DYNAMIC_FTRACE */
|
|
struct plt_entry *ftrace_trampoline;
|
|
};
|
|
#endif
|
|
|
|
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
|
|
Elf64_Sym *sym);
|
|
|
|
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val);
|
|
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
extern u64 module_alloc_base;
|
|
#else
|
|
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
|
|
#endif
|
|
|
|
struct plt_entry {
|
|
/*
|
|
* A program that conforms to the AArch64 Procedure Call Standard
|
|
* (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
|
|
* IP1 (x17) may be inserted at any branch instruction that is
|
|
* exposed to a relocation that supports long branches. Since that
|
|
* is exactly what we are dealing with here, we are free to use x16
|
|
* as a scratch register in the PLT veneers.
|
|
*/
|
|
__le32 mov0; /* movn x16, #0x.... */
|
|
__le32 mov1; /* movk x16, #0x...., lsl #16 */
|
|
__le32 mov2; /* movk x16, #0x...., lsl #32 */
|
|
__le32 br; /* br x16 */
|
|
};
|
|
|
|
static inline struct plt_entry get_plt_entry(u64 val)
|
|
{
|
|
/*
|
|
* MOVK/MOVN/MOVZ opcode:
|
|
* +--------+------------+--------+-----------+-------------+---------+
|
|
* | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
|
|
* +--------+------------+--------+-----------+-------------+---------+
|
|
*
|
|
* Rd := 0x10 (x16)
|
|
* hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
|
|
* opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
|
|
* sf := 1 (64-bit variant)
|
|
*/
|
|
return (struct plt_entry){
|
|
cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
|
|
cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
|
|
cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
|
|
cpu_to_le32(0xd61f0200)
|
|
};
|
|
}
|
|
|
|
static inline bool plt_entries_equal(const struct plt_entry *a,
|
|
const struct plt_entry *b)
|
|
{
|
|
return a->mov0 == b->mov0 &&
|
|
a->mov1 == b->mov1 &&
|
|
a->mov2 == b->mov2;
|
|
}
|
|
|
|
#endif /* __ASM_MODULE_H */
|