x86/entry: Fixup bad_iret vs noinstr

vmlinux.o: warning: objtool: fixup_bad_iret()+0x8e: call to memcpy() leaves .noinstr.text section

Worse, when KASAN there is no telling what memcpy() actually is. Force
the use of __memcpy() which is our assmebly implementation.

Reported-by: Marco Elver <elver@google.com>
Suggested-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Marco Elver <elver@google.com>
Link: https://lkml.kernel.org/r/20200618144801.760070502@infradead.org
This commit is contained in:
Peter Zijlstra 2020-06-17 18:21:16 +02:00
parent 734d099ba6
commit e3a9e681ad
2 changed files with 7 additions and 3 deletions

View File

@ -690,13 +690,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
/* Copy the IRET target to the temporary storage. */ /* Copy the IRET target to the temporary storage. */
memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8); __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
/* Copy the remainder of the stack from the current stack. */ /* Copy the remainder of the stack from the current stack. */
memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip)); __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
/* Update the entry stack */ /* Update the entry stack */
memcpy(new_stack, &tmp, sizeof(tmp)); __memcpy(new_stack, &tmp, sizeof(tmp));
BUG_ON(!user_mode(&new_stack->regs)); BUG_ON(!user_mode(&new_stack->regs));
return new_stack; return new_stack;

View File

@ -8,6 +8,8 @@
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h> #include <asm/export.h>
.pushsection .noinstr.text, "ax"
/* /*
* We build a jump to memcpy_orig by default which gets NOPped out on * We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
retq retq
SYM_FUNC_END(memcpy_orig) SYM_FUNC_END(memcpy_orig)
.popsection
#ifndef CONFIG_UML #ifndef CONFIG_UML
MCSAFE_TEST_CTL MCSAFE_TEST_CTL