mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 pti fixes from Thomas Gleixner: "A small set of fixes for the meltdown/spectre mitigations: - Make kprobes aware of retpolines to prevent probes in the retpoline thunks. - Make the machine check exception speculation protected. MCE used to issue an indirect call directly from the ASM entry code. Convert that to a direct call into a C-function and issue the indirect call from there so the compiler can add the retpoline protection, - Make the vmexit_fill_RSB() assembly less stupid - Fix a typo in the PTI documentation" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/retpoline: Optimize inline assembler for vmexit_fill_RSB x86/pti: Document fix wrong index kprobes/x86: Disable optimizing on the function jumps to indirect thunk kprobes/x86: Blacklist indirect thunk functions for kprobes retpoline: Introduce start/end markers of indirect thunk x86/mce: Make machine check speculation protected
This commit is contained in:
commit
5515114211
@ -78,7 +78,7 @@ this protection comes at a cost:
|
||||
non-PTI SYSCALL entry code, so requires mapping fewer
|
||||
things into the userspace page tables. The downside is
|
||||
that stacks must be switched at entry time.
|
||||
d. Global pages are disabled for all kernel structures not
|
||||
c. Global pages are disabled for all kernel structures not
|
||||
mapped into both kernel and userspace page tables. This
|
||||
feature of the MMU allows different processes to share TLB
|
||||
entries mapping the kernel. Losing the feature means more
|
||||
|
@ -1264,7 +1264,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
|
||||
idtentry machine_check do_mce has_error_code=0 paranoid=1
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -194,6 +194,9 @@ enum spectre_v2_mitigation {
|
||||
SPECTRE_V2_IBRS,
|
||||
};
|
||||
|
||||
extern char __indirect_thunk_start[];
|
||||
extern char __indirect_thunk_end[];
|
||||
|
||||
/*
|
||||
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
||||
* can be followed in the host, by overwriting the RSB completely. Both
|
||||
@ -203,16 +206,17 @@ enum spectre_v2_mitigation {
|
||||
static inline void vmexit_fill_RSB(void)
|
||||
{
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
unsigned long loops = RSB_CLEAR_LOOPS / 2;
|
||||
unsigned long loops;
|
||||
|
||||
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
|
||||
ALTERNATIVE("jmp 910f",
|
||||
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
||||
X86_FEATURE_RETPOLINE)
|
||||
"910:"
|
||||
: "=&r" (loops), ASM_CALL_CONSTRAINT
|
||||
: "r" (loops) : "memory" );
|
||||
: "=r" (loops), ASM_CALL_CONSTRAINT
|
||||
: : "memory" );
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __NOSPEC_BRANCH_H__ */
|
||||
|
@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
|
||||
#ifdef CONFIG_X86_32
|
||||
dotraplinkage void do_iret_error(struct pt_regs *, long);
|
||||
#endif
|
||||
dotraplinkage void do_mce(struct pt_regs *, long);
|
||||
|
||||
static inline int get_si_code(unsigned long condition)
|
||||
{
|
||||
|
@ -1785,6 +1785,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
|
||||
void (*machine_check_vector)(struct pt_regs *, long error_code) =
|
||||
unexpected_machine_check;
|
||||
|
||||
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
machine_check_vector(regs, error_code);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called for each booted CPU to set up machine checks.
|
||||
* Must be called with preempt off:
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
@ -203,7 +204,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
|
||||
}
|
||||
|
||||
/* Check whether insn is indirect jump */
|
||||
static int insn_is_indirect_jump(struct insn *insn)
|
||||
static int __insn_is_indirect_jump(struct insn *insn)
|
||||
{
|
||||
return ((insn->opcode.bytes[0] == 0xff &&
|
||||
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
|
||||
@ -237,6 +238,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
|
||||
return (start <= target && target <= start + len);
|
||||
}
|
||||
|
||||
static int insn_is_indirect_jump(struct insn *insn)
|
||||
{
|
||||
int ret = __insn_is_indirect_jump(insn);
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* Jump to x86_indirect_thunk_* is treated as an indirect jump.
|
||||
* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
|
||||
* older gcc may use indirect jump. So we add this check instead of
|
||||
* replace indirect-jump check.
|
||||
*/
|
||||
if (!ret)
|
||||
ret = insn_jump_into_range(insn,
|
||||
(unsigned long)__indirect_thunk_start,
|
||||
(unsigned long)__indirect_thunk_end -
|
||||
(unsigned long)__indirect_thunk_start);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Decode whole function to ensure any instructions don't jump into target */
|
||||
static int can_optimize(unsigned long paddr)
|
||||
{
|
||||
|
@ -124,6 +124,12 @@ SECTIONS
|
||||
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
__indirect_thunk_start = .;
|
||||
*(.text.__x86.indirect_thunk)
|
||||
__indirect_thunk_end = .;
|
||||
#endif
|
||||
|
||||
/* End of text section */
|
||||
_etext = .;
|
||||
} :text = 0x9090
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
.macro THUNK reg
|
||||
.section .text.__x86.indirect_thunk.\reg
|
||||
.section .text.__x86.indirect_thunk
|
||||
|
||||
ENTRY(__x86_indirect_thunk_\reg)
|
||||
CFI_STARTPROC
|
||||
@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
|
||||
* than one per register with the correct names. So we do it
|
||||
* the simple and nasty way...
|
||||
*/
|
||||
#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
|
||||
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
|
||||
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
|
||||
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
|
||||
|
||||
GENERATE_THUNK(_ASM_AX)
|
||||
|
Loading…
Reference in New Issue
Block a user