From f051f697955049c7cf10a635ab8149aa619243b2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Apr 2020 15:55:06 +0200 Subject: [PATCH] x86/nmi: Protect NMI entry against instrumentation Mark all functions in the fragile code parts noinstr or force inlining so they can't be instrumented. Also make the hardware latency tracer invocation explicit outside of non-instrumentable section. Signed-off-by: Thomas Gleixner Reviewed-by: Alexandre Chartre Acked-by: Peter Zijlstra Acked-by: Andy Lutomirski Link: https://lkml.kernel.org/r/20200505135314.716186134@linutronix.de --- arch/x86/include/asm/desc.h | 8 ++++---- arch/x86/kernel/cpu/common.c | 6 ++---- arch/x86/kernel/nmi.c | 15 +++++++++------ 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 085a2dd312b4..d6c3d346c63a 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -214,7 +214,7 @@ static inline void native_load_gdt(const struct desc_ptr *dtr) asm volatile("lgdt %0"::"m" (*dtr)); } -static inline void native_load_idt(const struct desc_ptr *dtr) +static __always_inline void native_load_idt(const struct desc_ptr *dtr) { asm volatile("lidt %0"::"m" (*dtr)); } @@ -392,7 +392,7 @@ extern unsigned long system_vectors[]; #ifdef CONFIG_X86_64 DECLARE_PER_CPU(u32, debug_idt_ctr); -static inline bool is_debug_idt_enabled(void) +static __always_inline bool is_debug_idt_enabled(void) { if (this_cpu_read(debug_idt_ctr)) return true; @@ -400,7 +400,7 @@ static inline bool is_debug_idt_enabled(void) return false; } -static inline void load_debug_idt(void) +static __always_inline void load_debug_idt(void) { load_idt((const struct desc_ptr *)&debug_idt_descr); } @@ -422,7 +422,7 @@ static inline void load_debug_idt(void) * that doesn't need to disable interrupts, as nothing should be * bothering the CPU then. */ -static inline void load_current_idt(void) +static __always_inline void load_current_idt(void) { if (is_debug_idt_enabled()) load_debug_idt(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8be042df12c3..f4645f9ff9cb 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1709,21 +1709,19 @@ void syscall_init(void) DEFINE_PER_CPU(int, debug_stack_usage); DEFINE_PER_CPU(u32, debug_idt_ctr); -void debug_stack_set_zero(void) +noinstr void debug_stack_set_zero(void) { this_cpu_inc(debug_idt_ctr); load_current_idt(); } -NOKPROBE_SYMBOL(debug_stack_set_zero); -void debug_stack_reset(void) +noinstr void debug_stack_reset(void) { if (WARN_ON(!this_cpu_read(debug_idt_ctr))) return; if (this_cpu_dec_return(debug_idt_ctr) == 0) load_current_idt(); } -NOKPROBE_SYMBOL(debug_stack_reset); #else /* CONFIG_X86_64 */ diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 3b05cc802abb..3052c78f03aa 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -303,7 +303,7 @@ NOKPROBE_SYMBOL(unknown_nmi_error); static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(unsigned long, last_nmi_rip); -static void default_do_nmi(struct pt_regs *regs) +static noinstr void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; int handled; @@ -329,6 +329,8 @@ static void default_do_nmi(struct pt_regs *regs) __this_cpu_write(last_nmi_rip, regs->ip); + instrumentation_begin(); + handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { @@ -342,7 +344,7 @@ static void default_do_nmi(struct pt_regs *regs) */ if (handled > 1) __this_cpu_write(swallow_nmi, true); - return; + goto out; } /* @@ -374,7 +376,7 @@ static void default_do_nmi(struct pt_regs *regs) #endif __this_cpu_add(nmi_stats.external, 1); raw_spin_unlock(&nmi_reason_lock); - return; + goto out; } raw_spin_unlock(&nmi_reason_lock); @@ -412,8 +414,10 @@ static void default_do_nmi(struct pt_regs *regs) __this_cpu_add(nmi_stats.swallow, 1); else unknown_nmi_error(reason, regs); + +out: + instrumentation_end(); } -NOKPROBE_SYMBOL(default_do_nmi); /* * NMIs can page fault or hit breakpoints which will cause it to lose @@ -485,7 +489,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_cr2); */ static DEFINE_PER_CPU(int, update_debug_stack); -static bool notrace is_debug_stack(unsigned long addr) +static noinstr bool is_debug_stack(unsigned long addr) { struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks); unsigned long top = CEA_ESTACK_TOP(cs, DB); @@ -500,7 +504,6 @@ static bool notrace is_debug_stack(unsigned long addr) */ return addr >= bot && addr < top; } -NOKPROBE_SYMBOL(is_debug_stack); #endif DEFINE_IDTENTRY_NMI(exc_nmi)