mirror of
https://github.com/torvalds/linux.git
synced 2024-12-20 10:01:56 +00:00
f81f8ad56f
As described in:
77b0bf55bc
: ("kbuild/Makefile: Prepare for using macros in inline assembly code to work around asm() related GCC inlining bugs")
GCC's inlining heuristics are broken with common asm() patterns used in
kernel code, resulting in the effective disabling of inlining.
The workaround is to set an assembly macro and call it from the inline
assembly block. As a result GCC considers the inline assembly block as
a single instruction. (Which it isn't, but that's the best we can get.)
This patch increases the kernel size:
text data bss dec hex filename
18146889 10225380 2957312 31329581 1de0d2d ./vmlinux before
18147336 10226688 2957312 31331336 1de1408 ./vmlinux after (+1755)
But enables more aggressive inlining (and probably better branch decisions).
The number of static text symbols in vmlinux is much lower:
Before: 40218
After: 40053 (-165)
The assembly code gets harder to read due to the extra macro layer.
[ mingo: Rewrote the changelog. ]
Tested-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Nadav Amit <namit@vmware.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20181003213100.189959-7-namit@vmware.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
100 lines
2.1 KiB
C
100 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_BUG_H
|
|
#define _ASM_X86_BUG_H
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/*
|
|
* Despite that some emulators terminate on UD2, we use it for WARN().
|
|
*
|
|
* Since various instruction decoders/specs disagree on the encoding of
|
|
* UD0/UD1.
|
|
*/
|
|
|
|
#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
|
|
#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
|
|
#define ASM_UD2 ".byte 0x0f, 0x0b"
|
|
|
|
#define INSN_UD0 0xff0f
|
|
#define INSN_UD2 0x0b0f
|
|
|
|
#define LEN_UD2 2
|
|
|
|
#define _BUG_FLAGS(ins, flags) \
|
|
do { \
|
|
asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \
|
|
"flags=%c2 size=%c3" \
|
|
: : "i" (__FILE__), "i" (__LINE__), \
|
|
"i" (flags), \
|
|
"i" (sizeof(struct bug_entry))); \
|
|
} while (0)
|
|
|
|
#define HAVE_ARCH_BUG
|
|
#define BUG() \
|
|
do { \
|
|
_BUG_FLAGS(ASM_UD2, 0); \
|
|
unreachable(); \
|
|
} while (0)
|
|
|
|
#define __WARN_FLAGS(flags) \
|
|
do { \
|
|
_BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
|
|
annotate_reachable(); \
|
|
} while (0)
|
|
|
|
#include <asm-generic/bug.h>
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_GENERIC_BUG
|
|
|
|
#ifdef CONFIG_X86_32
|
|
.macro __BUG_REL val:req
|
|
.long \val
|
|
.endm
|
|
#else
|
|
.macro __BUG_REL val:req
|
|
.long \val - 2b
|
|
.endm
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
|
|
|
.macro ASM_BUG ins:req file:req line:req flags:req size:req
|
|
1: \ins
|
|
.pushsection __bug_table,"aw"
|
|
2: __BUG_REL val=1b # bug_entry::bug_addr
|
|
__BUG_REL val=\file # bug_entry::file
|
|
.word \line # bug_entry::line
|
|
.word \flags # bug_entry::flags
|
|
.org 2b+\size
|
|
.popsection
|
|
.endm
|
|
|
|
#else /* !CONFIG_DEBUG_BUGVERBOSE */
|
|
|
|
.macro ASM_BUG ins:req file:req line:req flags:req size:req
|
|
1: \ins
|
|
.pushsection __bug_table,"aw"
|
|
2: __BUG_REL val=1b # bug_entry::bug_addr
|
|
.word \flags # bug_entry::flags
|
|
.org 2b+\size
|
|
.popsection
|
|
.endm
|
|
|
|
#endif /* CONFIG_DEBUG_BUGVERBOSE */
|
|
|
|
#else /* CONFIG_GENERIC_BUG */
|
|
|
|
.macro ASM_BUG ins:req file:req line:req flags:req size:req
|
|
\ins
|
|
.endm
|
|
|
|
#endif /* CONFIG_GENERIC_BUG */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_BUG_H */
|