x86: Undo return-thunk damage

Introduce X86_FEATURE_RETHUNK for those afflicted with needing this.

  [ bp: Do only INT3 padding - simpler. ]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
Peter Zijlstra 2022-06-14 23:15:37 +02:00 committed by Borislav Petkov
parent 0b53c374b9
commit 15e67227c4
6 changed files with 78 additions and 2 deletions

View File

@ -76,6 +76,7 @@ extern int alternatives_patched;
extern void alternative_instructions(void); extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
extern void apply_retpolines(s32 *start, s32 *end); extern void apply_retpolines(s32 *start, s32 *end);
extern void apply_returns(s32 *start, s32 *end);
extern void apply_ibt_endbr(s32 *start, s32 *end); extern void apply_ibt_endbr(s32 *start, s32 *end);
struct module; struct module;

View File

@ -300,6 +300,7 @@
/* FREE! (11*32+11) */ /* FREE! (11*32+11) */
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */

View File

@ -54,7 +54,8 @@
# define DISABLE_RETPOLINE 0 # define DISABLE_RETPOLINE 0
#else #else
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \ # define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31))) (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)) | \
(1 << (X86_FEATURE_RETHUNK & 31)))
#endif #endif
#ifdef CONFIG_INTEL_IOMMU_SVM #ifdef CONFIG_INTEL_IOMMU_SVM

View File

@ -115,6 +115,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
} }
extern s32 __retpoline_sites[], __retpoline_sites_end[]; extern s32 __retpoline_sites[], __retpoline_sites_end[];
extern s32 __return_sites[], __return_sites_end[];
extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[]; extern s32 __smp_locks[], __smp_locks_end[];
@ -507,9 +508,67 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
} }
} }
/*
* Rewrite the compiler generated return thunk tail-calls.
*
* For example, convert:
*
* JMP __x86_return_thunk
*
* into:
*
* RET
*/
static int patch_return(void *addr, struct insn *insn, u8 *bytes)
{
int i = 0;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
return -1;
bytes[i++] = RET_INSN_OPCODE;
for (; i < insn->length;)
bytes[i++] = INT3_INSN_OPCODE;
return i;
}
void __init_or_module noinline apply_returns(s32 *start, s32 *end)
{
s32 *s;
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
struct insn insn;
int len, ret;
u8 bytes[16];
u8 op1;
ret = insn_decode_kernel(&insn, addr);
if (WARN_ON_ONCE(ret < 0))
continue;
op1 = insn.opcode.bytes[0];
if (WARN_ON_ONCE(op1 != JMP32_INSN_OPCODE))
continue;
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
addr, addr, insn.length,
addr + insn.length + insn.immediate.value);
len = patch_return(addr, &insn, bytes);
if (len == insn.length) {
DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
text_poke_early(addr, bytes, len);
}
}
}
#else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */ #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
#endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */ #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
@ -860,6 +919,7 @@ void __init alternative_instructions(void)
* those can rewrite the retpoline thunks. * those can rewrite the retpoline thunks.
*/ */
apply_retpolines(__retpoline_sites, __retpoline_sites_end); apply_retpolines(__retpoline_sites, __retpoline_sites_end);
apply_returns(__return_sites, __return_sites_end);
/* /*
* Then patch alternatives, such that those paravirt calls that are in * Then patch alternatives, such that those paravirt calls that are in

View File

@ -253,7 +253,7 @@ int module_finalize(const Elf_Ehdr *hdr,
{ {
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL, *orc = NULL, *orc_ip = NULL, *para = NULL, *orc = NULL, *orc_ip = NULL,
*retpolines = NULL, *ibt_endbr = NULL; *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
@ -271,6 +271,8 @@ int module_finalize(const Elf_Ehdr *hdr,
orc_ip = s; orc_ip = s;
if (!strcmp(".retpoline_sites", secstrings + s->sh_name)) if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
retpolines = s; retpolines = s;
if (!strcmp(".return_sites", secstrings + s->sh_name))
returns = s;
if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name)) if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
ibt_endbr = s; ibt_endbr = s;
} }
@ -287,6 +289,10 @@ int module_finalize(const Elf_Ehdr *hdr,
void *rseg = (void *)retpolines->sh_addr; void *rseg = (void *)retpolines->sh_addr;
apply_retpolines(rseg, rseg + retpolines->sh_size); apply_retpolines(rseg, rseg + retpolines->sh_size);
} }
if (returns) {
void *rseg = (void *)returns->sh_addr;
apply_returns(rseg, rseg + returns->sh_size);
}
if (alt) { if (alt) {
/* patch .altinstructions */ /* patch .altinstructions */
void *aseg = (void *)alt->sh_addr; void *aseg = (void *)alt->sh_addr;

View File

@ -283,6 +283,13 @@ SECTIONS
*(.retpoline_sites) *(.retpoline_sites)
__retpoline_sites_end = .; __retpoline_sites_end = .;
} }
. = ALIGN(8);
.return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
__return_sites = .;
*(.return_sites)
__return_sites_end = .;
}
#endif #endif
#ifdef CONFIG_X86_KERNEL_IBT #ifdef CONFIG_X86_KERNEL_IBT