Merge tag 'x86_core_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 updates from Borislav Petkov: - Turn the stack canary into a normal __percpu variable on 32-bit which gets rid of the LAZY_GS stuff and a lot of code. - Add an insn_decode() API which all users of the instruction decoder should preferrably use. Its goal is to keep the details of the instruction decoder away from its users and simplify and streamline how one decodes insns in the kernel. Convert its users to it. - kprobes improvements and fixes - Set the maximum DIE per package variable on Hygon - Rip out the dynamic NOP selection and simplify all the machinery around selecting NOPs. Use the simplified NOPs in objtool now too. - Add Xeon Sapphire Rapids to list of CPUs that support PPIN - Simplify the retpolines by folding the entire thing into an alternative now that objtool can handle alternatives with stack ops. Then, have objtool rewrite the call to the retpoline with the alternative which then will get patched at boot time. - Document Intel uarch per models in intel-family.h - Make Sub-NUMA Clustering topology the default and Cluster-on-Die the exception on Intel. * tag 'x86_core_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (53 commits) x86, sched: Treat Intel SNC topology as default, COD as exception x86/cpu: Comment Skylake server stepping too x86/cpu: Resort and comment Intel models objtool/x86: Rewrite retpoline thunk calls objtool: Skip magical retpoline .altinstr_replacement objtool: Cache instruction relocs objtool: Keep track of retpoline call sites objtool: Add elf_create_undef_symbol() objtool: Extract elf_symbol_add() objtool: Extract elf_strtab_concat() objtool: Create reloc sections implicitly objtool: Add elf_create_reloc() helper objtool: Rework the elf_rebuild_reloc_section() logic objtool: Fix static_call list generation objtool: Handle per arch retpoline naming objtool: Correctly handle retpoline thunk calls x86/retpoline: Simplify retpolines x86/alternatives: Optimize optimize_nops() x86: Add insn_decode_kernel() x86/kprobes: Move 'inline' to the beginning of the kprobe_is_ss() declaration ...
This commit is contained in:
@@ -361,10 +361,6 @@ config X86_64_SMP
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on X86_64 && SMP
|
depends on X86_64 && SMP
|
||||||
|
|
||||||
config X86_32_LAZY_GS
|
|
||||||
def_bool y
|
|
||||||
depends on X86_32 && !STACKPROTECTOR
|
|
||||||
|
|
||||||
config ARCH_SUPPORTS_UPROBES
|
config ARCH_SUPPORTS_UPROBES
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
@@ -387,7 +383,8 @@ config CC_HAS_SANE_STACKPROTECTOR
|
|||||||
default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC))
|
default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC))
|
||||||
help
|
help
|
||||||
We have to make sure stack protector is unconditionally disabled if
|
We have to make sure stack protector is unconditionally disabled if
|
||||||
the compiler produces broken code.
|
the compiler produces broken code or if it does not let us control
|
||||||
|
the segment on 32-bit kernels.
|
||||||
|
|
||||||
menu "Processor type and features"
|
menu "Processor type and features"
|
||||||
|
|
||||||
|
|||||||
@@ -80,6 +80,14 @@ ifeq ($(CONFIG_X86_32),y)
|
|||||||
|
|
||||||
# temporary until string.h is fixed
|
# temporary until string.h is fixed
|
||||||
KBUILD_CFLAGS += -ffreestanding
|
KBUILD_CFLAGS += -ffreestanding
|
||||||
|
|
||||||
|
ifeq ($(CONFIG_STACKPROTECTOR),y)
|
||||||
|
ifeq ($(CONFIG_SMP),y)
|
||||||
|
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
|
||||||
|
else
|
||||||
|
KBUILD_CFLAGS += -mstack-protector-guard=global
|
||||||
|
endif
|
||||||
|
endif
|
||||||
else
|
else
|
||||||
BITS := 64
|
BITS := 64
|
||||||
UTS_MACHINE := x86_64
|
UTS_MACHINE := x86_64
|
||||||
|
|||||||
@@ -78,16 +78,15 @@ static inline void sev_es_wr_ghcb_msr(u64 val)
|
|||||||
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
char buffer[MAX_INSN_SIZE];
|
char buffer[MAX_INSN_SIZE];
|
||||||
enum es_result ret;
|
int ret;
|
||||||
|
|
||||||
memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
|
memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
|
||||||
|
|
||||||
insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1);
|
ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
|
||||||
insn_get_length(&ctxt->insn);
|
if (ret < 0)
|
||||||
|
return ES_DECODE_FAILED;
|
||||||
|
|
||||||
ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
|
return ES_OK;
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
* 1C(%esp) - %ds
|
* 1C(%esp) - %ds
|
||||||
* 20(%esp) - %es
|
* 20(%esp) - %es
|
||||||
* 24(%esp) - %fs
|
* 24(%esp) - %fs
|
||||||
* 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
|
* 28(%esp) - unused -- was %gs on old stackprotector kernels
|
||||||
* 2C(%esp) - orig_eax
|
* 2C(%esp) - orig_eax
|
||||||
* 30(%esp) - %eip
|
* 30(%esp) - %eip
|
||||||
* 34(%esp) - %cs
|
* 34(%esp) - %cs
|
||||||
@@ -53,83 +53,6 @@
|
|||||||
|
|
||||||
#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
|
#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
|
||||||
|
|
||||||
/*
|
|
||||||
* User gs save/restore
|
|
||||||
*
|
|
||||||
* %gs is used for userland TLS and kernel only uses it for stack
|
|
||||||
* canary which is required to be at %gs:20 by gcc. Read the comment
|
|
||||||
* at the top of stackprotector.h for more info.
|
|
||||||
*
|
|
||||||
* Local labels 98 and 99 are used.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_32_LAZY_GS
|
|
||||||
|
|
||||||
/* unfortunately push/pop can't be no-op */
|
|
||||||
.macro PUSH_GS
|
|
||||||
pushl $0
|
|
||||||
.endm
|
|
||||||
.macro POP_GS pop=0
|
|
||||||
addl $(4 + \pop), %esp
|
|
||||||
.endm
|
|
||||||
.macro POP_GS_EX
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/* all the rest are no-op */
|
|
||||||
.macro PTGS_TO_GS
|
|
||||||
.endm
|
|
||||||
.macro PTGS_TO_GS_EX
|
|
||||||
.endm
|
|
||||||
.macro GS_TO_REG reg
|
|
||||||
.endm
|
|
||||||
.macro REG_TO_PTGS reg
|
|
||||||
.endm
|
|
||||||
.macro SET_KERNEL_GS reg
|
|
||||||
.endm
|
|
||||||
|
|
||||||
#else /* CONFIG_X86_32_LAZY_GS */
|
|
||||||
|
|
||||||
.macro PUSH_GS
|
|
||||||
pushl %gs
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro POP_GS pop=0
|
|
||||||
98: popl %gs
|
|
||||||
.if \pop <> 0
|
|
||||||
add $\pop, %esp
|
|
||||||
.endif
|
|
||||||
.endm
|
|
||||||
.macro POP_GS_EX
|
|
||||||
.pushsection .fixup, "ax"
|
|
||||||
99: movl $0, (%esp)
|
|
||||||
jmp 98b
|
|
||||||
.popsection
|
|
||||||
_ASM_EXTABLE(98b, 99b)
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro PTGS_TO_GS
|
|
||||||
98: mov PT_GS(%esp), %gs
|
|
||||||
.endm
|
|
||||||
.macro PTGS_TO_GS_EX
|
|
||||||
.pushsection .fixup, "ax"
|
|
||||||
99: movl $0, PT_GS(%esp)
|
|
||||||
jmp 98b
|
|
||||||
.popsection
|
|
||||||
_ASM_EXTABLE(98b, 99b)
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro GS_TO_REG reg
|
|
||||||
movl %gs, \reg
|
|
||||||
.endm
|
|
||||||
.macro REG_TO_PTGS reg
|
|
||||||
movl \reg, PT_GS(%esp)
|
|
||||||
.endm
|
|
||||||
.macro SET_KERNEL_GS reg
|
|
||||||
movl $(__KERNEL_STACK_CANARY), \reg
|
|
||||||
movl \reg, %gs
|
|
||||||
.endm
|
|
||||||
|
|
||||||
#endif /* CONFIG_X86_32_LAZY_GS */
|
|
||||||
|
|
||||||
/* Unconditionally switch to user cr3 */
|
/* Unconditionally switch to user cr3 */
|
||||||
.macro SWITCH_TO_USER_CR3 scratch_reg:req
|
.macro SWITCH_TO_USER_CR3 scratch_reg:req
|
||||||
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
|
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
|
||||||
@@ -282,7 +205,7 @@
|
|||||||
.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
|
.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
|
||||||
cld
|
cld
|
||||||
.if \skip_gs == 0
|
.if \skip_gs == 0
|
||||||
PUSH_GS
|
pushl $0
|
||||||
.endif
|
.endif
|
||||||
pushl %fs
|
pushl %fs
|
||||||
|
|
||||||
@@ -307,9 +230,6 @@
|
|||||||
movl $(__USER_DS), %edx
|
movl $(__USER_DS), %edx
|
||||||
movl %edx, %ds
|
movl %edx, %ds
|
||||||
movl %edx, %es
|
movl %edx, %es
|
||||||
.if \skip_gs == 0
|
|
||||||
SET_KERNEL_GS %edx
|
|
||||||
.endif
|
|
||||||
/* Switch to kernel stack if necessary */
|
/* Switch to kernel stack if necessary */
|
||||||
.if \switch_stacks > 0
|
.if \switch_stacks > 0
|
||||||
SWITCH_TO_KERNEL_STACK
|
SWITCH_TO_KERNEL_STACK
|
||||||
@@ -348,7 +268,7 @@
|
|||||||
1: popl %ds
|
1: popl %ds
|
||||||
2: popl %es
|
2: popl %es
|
||||||
3: popl %fs
|
3: popl %fs
|
||||||
POP_GS \pop
|
addl $(4 + \pop), %esp /* pop the unused "gs" slot */
|
||||||
IRET_FRAME
|
IRET_FRAME
|
||||||
.pushsection .fixup, "ax"
|
.pushsection .fixup, "ax"
|
||||||
4: movl $0, (%esp)
|
4: movl $0, (%esp)
|
||||||
@@ -361,7 +281,6 @@
|
|||||||
_ASM_EXTABLE(1b, 4b)
|
_ASM_EXTABLE(1b, 4b)
|
||||||
_ASM_EXTABLE(2b, 5b)
|
_ASM_EXTABLE(2b, 5b)
|
||||||
_ASM_EXTABLE(3b, 6b)
|
_ASM_EXTABLE(3b, 6b)
|
||||||
POP_GS_EX
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro RESTORE_ALL_NMI cr3_reg:req pop=0
|
.macro RESTORE_ALL_NMI cr3_reg:req pop=0
|
||||||
@@ -779,7 +698,7 @@ SYM_CODE_START(__switch_to_asm)
|
|||||||
|
|
||||||
#ifdef CONFIG_STACKPROTECTOR
|
#ifdef CONFIG_STACKPROTECTOR
|
||||||
movl TASK_stack_canary(%edx), %ebx
|
movl TASK_stack_canary(%edx), %ebx
|
||||||
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
movl %ebx, PER_CPU_VAR(__stack_chk_guard)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
@@ -976,7 +895,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
|
|||||||
movl PT_EIP(%esp), %edx /* pt_regs->ip */
|
movl PT_EIP(%esp), %edx /* pt_regs->ip */
|
||||||
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
|
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
|
||||||
1: mov PT_FS(%esp), %fs
|
1: mov PT_FS(%esp), %fs
|
||||||
PTGS_TO_GS
|
|
||||||
|
|
||||||
popl %ebx /* pt_regs->bx */
|
popl %ebx /* pt_regs->bx */
|
||||||
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
|
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
|
||||||
@@ -1012,7 +930,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
|
|||||||
jmp 1b
|
jmp 1b
|
||||||
.popsection
|
.popsection
|
||||||
_ASM_EXTABLE(1b, 2b)
|
_ASM_EXTABLE(1b, 2b)
|
||||||
PTGS_TO_GS_EX
|
|
||||||
|
|
||||||
.Lsysenter_fix_flags:
|
.Lsysenter_fix_flags:
|
||||||
pushl $X86_EFLAGS_FIXED
|
pushl $X86_EFLAGS_FIXED
|
||||||
@@ -1154,11 +1071,7 @@ SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
|
|||||||
SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
|
SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
|
||||||
ENCODE_FRAME_POINTER
|
ENCODE_FRAME_POINTER
|
||||||
|
|
||||||
/* fixup %gs */
|
|
||||||
GS_TO_REG %ecx
|
|
||||||
movl PT_GS(%esp), %edi # get the function address
|
movl PT_GS(%esp), %edi # get the function address
|
||||||
REG_TO_PTGS %ecx
|
|
||||||
SET_KERNEL_GS %ecx
|
|
||||||
|
|
||||||
/* fixup orig %eax */
|
/* fixup orig %eax */
|
||||||
movl PT_ORIG_EAX(%esp), %edx # get the error code
|
movl PT_ORIG_EAX(%esp), %edx # get the error code
|
||||||
|
|||||||
@@ -1353,14 +1353,13 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
|
|||||||
is_64bit = kernel_ip(to) || any_64bit_mode(regs);
|
is_64bit = kernel_ip(to) || any_64bit_mode(regs);
|
||||||
#endif
|
#endif
|
||||||
insn_init(&insn, kaddr, size, is_64bit);
|
insn_init(&insn, kaddr, size, is_64bit);
|
||||||
insn_get_length(&insn);
|
|
||||||
/*
|
/*
|
||||||
* Make sure there was not a problem decoding the
|
* Make sure there was not a problem decoding the instruction.
|
||||||
* instruction and getting the length. This is
|
* This is doubly important because we have an infinite loop if
|
||||||
* doubly important because we have an infinite
|
* insn.length=0.
|
||||||
* loop if insn.length=0.
|
|
||||||
*/
|
*/
|
||||||
if (!insn.length)
|
if (insn_get_length(&insn))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
to += insn.length;
|
to += insn.length;
|
||||||
|
|||||||
@@ -1224,8 +1224,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
|
|||||||
is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
|
is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
|
||||||
#endif
|
#endif
|
||||||
insn_init(&insn, addr, bytes_read, is64);
|
insn_init(&insn, addr, bytes_read, is64);
|
||||||
insn_get_opcode(&insn);
|
if (insn_get_opcode(&insn))
|
||||||
if (!insn.opcode.got)
|
|
||||||
return X86_BR_ABORT;
|
return X86_BR_ABORT;
|
||||||
|
|
||||||
switch (insn.opcode.bytes[0]) {
|
switch (insn.opcode.bytes[0]) {
|
||||||
@@ -1262,8 +1261,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
|
|||||||
ret = X86_BR_INT;
|
ret = X86_BR_INT;
|
||||||
break;
|
break;
|
||||||
case 0xe8: /* call near rel */
|
case 0xe8: /* call near rel */
|
||||||
insn_get_immediate(&insn);
|
if (insn_get_immediate(&insn) || insn.immediate1.value == 0) {
|
||||||
if (insn.immediate1.value == 0) {
|
|
||||||
/* zero length call */
|
/* zero length call */
|
||||||
ret = X86_BR_ZERO_CALL;
|
ret = X86_BR_ZERO_CALL;
|
||||||
break;
|
break;
|
||||||
@@ -1279,7 +1277,9 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
|
|||||||
ret = X86_BR_JMP;
|
ret = X86_BR_JMP;
|
||||||
break;
|
break;
|
||||||
case 0xff: /* call near absolute, call far absolute ind */
|
case 0xff: /* call near absolute, call far absolute ind */
|
||||||
insn_get_modrm(&insn);
|
if (insn_get_modrm(&insn))
|
||||||
|
return X86_BR_ABORT;
|
||||||
|
|
||||||
ext = (insn.modrm.bytes[0] >> 3) & 0x7;
|
ext = (insn.modrm.bytes[0] >> 3) & 0x7;
|
||||||
switch (ext) {
|
switch (ext) {
|
||||||
case 2: /* near ind call */
|
case 2: /* near ind call */
|
||||||
|
|||||||
@@ -65,7 +65,6 @@ struct alt_instr {
|
|||||||
u16 cpuid; /* cpuid bit set for replacement */
|
u16 cpuid; /* cpuid bit set for replacement */
|
||||||
u8 instrlen; /* length of original instruction */
|
u8 instrlen; /* length of original instruction */
|
||||||
u8 replacementlen; /* length of new instruction */
|
u8 replacementlen; /* length of new instruction */
|
||||||
u8 padlen; /* length of build-time padding */
|
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -104,7 +103,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
|
|
||||||
#define alt_end_marker "663"
|
#define alt_end_marker "663"
|
||||||
#define alt_slen "662b-661b"
|
#define alt_slen "662b-661b"
|
||||||
#define alt_pad_len alt_end_marker"b-662b"
|
|
||||||
#define alt_total_slen alt_end_marker"b-661b"
|
#define alt_total_slen alt_end_marker"b-661b"
|
||||||
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
|
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
|
||||||
|
|
||||||
@@ -151,8 +149,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
|
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
|
||||||
" .word " __stringify(feature) "\n" /* feature bit */ \
|
" .word " __stringify(feature) "\n" /* feature bit */ \
|
||||||
" .byte " alt_total_slen "\n" /* source len */ \
|
" .byte " alt_total_slen "\n" /* source len */ \
|
||||||
" .byte " alt_rlen(num) "\n" /* replacement len */ \
|
" .byte " alt_rlen(num) "\n" /* replacement len */
|
||||||
" .byte " alt_pad_len "\n" /* pad len */
|
|
||||||
|
|
||||||
#define ALTINSTR_REPLACEMENT(newinstr, num) /* replacement */ \
|
#define ALTINSTR_REPLACEMENT(newinstr, num) /* replacement */ \
|
||||||
"# ALT: replacement " #num "\n" \
|
"# ALT: replacement " #num "\n" \
|
||||||
@@ -224,9 +221,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
* Peculiarities:
|
* Peculiarities:
|
||||||
* No memory clobber here.
|
* No memory clobber here.
|
||||||
* Argument numbers start with 1.
|
* Argument numbers start with 1.
|
||||||
* Best is to use constraints that are fixed size (like (%1) ... "r")
|
|
||||||
* If you use variable sized constraints like "m" or "g" in the
|
|
||||||
* replacement make sure to pad to the worst case length.
|
|
||||||
* Leaving an unused argument 0 to keep API compatibility.
|
* Leaving an unused argument 0 to keep API compatibility.
|
||||||
*/
|
*/
|
||||||
#define alternative_input(oldinstr, newinstr, feature, input...) \
|
#define alternative_input(oldinstr, newinstr, feature, input...) \
|
||||||
@@ -315,13 +309,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
* enough information for the alternatives patching code to patch an
|
* enough information for the alternatives patching code to patch an
|
||||||
* instruction. See apply_alternatives().
|
* instruction. See apply_alternatives().
|
||||||
*/
|
*/
|
||||||
.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
|
.macro altinstruction_entry orig alt feature orig_len alt_len
|
||||||
.long \orig - .
|
.long \orig - .
|
||||||
.long \alt - .
|
.long \alt - .
|
||||||
.word \feature
|
.word \feature
|
||||||
.byte \orig_len
|
.byte \orig_len
|
||||||
.byte \alt_len
|
.byte \alt_len
|
||||||
.byte \pad_len
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -338,7 +331,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
142:
|
142:
|
||||||
|
|
||||||
.pushsection .altinstructions,"a"
|
.pushsection .altinstructions,"a"
|
||||||
altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
|
altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f
|
||||||
.popsection
|
.popsection
|
||||||
|
|
||||||
.pushsection .altinstr_replacement,"ax"
|
.pushsection .altinstr_replacement,"ax"
|
||||||
@@ -375,8 +368,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
142:
|
142:
|
||||||
|
|
||||||
.pushsection .altinstructions,"a"
|
.pushsection .altinstructions,"a"
|
||||||
altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
|
altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f
|
||||||
altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
|
altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f
|
||||||
.popsection
|
.popsection
|
||||||
|
|
||||||
.pushsection .altinstr_replacement,"ax"
|
.pushsection .altinstr_replacement,"ax"
|
||||||
|
|||||||
@@ -19,18 +19,19 @@ extern void cmpxchg8b_emu(void);
|
|||||||
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
|
||||||
#define DECL_INDIRECT_THUNK(reg) \
|
|
||||||
extern asmlinkage void __x86_indirect_thunk_ ## reg (void);
|
|
||||||
|
|
||||||
#define DECL_RETPOLINE(reg) \
|
|
||||||
extern asmlinkage void __x86_retpoline_ ## reg (void);
|
|
||||||
|
|
||||||
#undef GEN
|
#undef GEN
|
||||||
#define GEN(reg) DECL_INDIRECT_THUNK(reg)
|
#define GEN(reg) \
|
||||||
|
extern asmlinkage void __x86_indirect_thunk_ ## reg (void);
|
||||||
#include <asm/GEN-for-each-reg.h>
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
|
||||||
#undef GEN
|
#undef GEN
|
||||||
#define GEN(reg) DECL_RETPOLINE(reg)
|
#define GEN(reg) \
|
||||||
|
extern asmlinkage void __x86_indirect_alt_call_ ## reg (void);
|
||||||
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
|
||||||
|
#undef GEN
|
||||||
|
#define GEN(reg) \
|
||||||
|
extern asmlinkage void __x86_indirect_alt_jmp_ ## reg (void);
|
||||||
#include <asm/GEN-for-each-reg.h>
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
|
||||||
#endif /* CONFIG_RETPOLINE */
|
#endif /* CONFIG_RETPOLINE */
|
||||||
|
|||||||
@@ -84,7 +84,7 @@
|
|||||||
|
|
||||||
/* CPU types for specific tunings: */
|
/* CPU types for specific tunings: */
|
||||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
||||||
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
|
/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */
|
||||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
||||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
||||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
*
|
*
|
||||||
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||||
*/
|
*/
|
||||||
#include <asm/inat_types.h>
|
#include <asm/inat_types.h> /* __ignore_sync_check__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal bits. Don't use bitmasks directly, because these bits are
|
* Internal bits. Don't use bitmasks directly, because these bits are
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ int insn_fetch_from_user(struct pt_regs *regs,
|
|||||||
unsigned char buf[MAX_INSN_SIZE]);
|
unsigned char buf[MAX_INSN_SIZE]);
|
||||||
int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
||||||
unsigned char buf[MAX_INSN_SIZE]);
|
unsigned char buf[MAX_INSN_SIZE]);
|
||||||
bool insn_decode(struct insn *insn, struct pt_regs *regs,
|
bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
|
||||||
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
||||||
|
|
||||||
#endif /* _ASM_X86_INSN_EVAL_H */
|
#endif /* _ASM_X86_INSN_EVAL_H */
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
/* insn_attr_t is defined in inat.h */
|
/* insn_attr_t is defined in inat.h */
|
||||||
#include <asm/inat.h>
|
#include <asm/inat.h> /* __ignore_sync_check__ */
|
||||||
|
|
||||||
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
||||||
|
|
||||||
@@ -132,13 +132,25 @@ struct insn {
|
|||||||
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
||||||
|
|
||||||
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
||||||
extern void insn_get_prefixes(struct insn *insn);
|
extern int insn_get_prefixes(struct insn *insn);
|
||||||
extern void insn_get_opcode(struct insn *insn);
|
extern int insn_get_opcode(struct insn *insn);
|
||||||
extern void insn_get_modrm(struct insn *insn);
|
extern int insn_get_modrm(struct insn *insn);
|
||||||
extern void insn_get_sib(struct insn *insn);
|
extern int insn_get_sib(struct insn *insn);
|
||||||
extern void insn_get_displacement(struct insn *insn);
|
extern int insn_get_displacement(struct insn *insn);
|
||||||
extern void insn_get_immediate(struct insn *insn);
|
extern int insn_get_immediate(struct insn *insn);
|
||||||
extern void insn_get_length(struct insn *insn);
|
extern int insn_get_length(struct insn *insn);
|
||||||
|
|
||||||
|
enum insn_mode {
|
||||||
|
INSN_MODE_32,
|
||||||
|
INSN_MODE_64,
|
||||||
|
/* Mode is determined by the current kernel build. */
|
||||||
|
INSN_MODE_KERN,
|
||||||
|
INSN_NUM_MODES,
|
||||||
|
};
|
||||||
|
|
||||||
|
extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
|
||||||
|
|
||||||
|
#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
|
||||||
|
|
||||||
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
||||||
static inline void insn_get_attribute(struct insn *insn)
|
static inline void insn_get_attribute(struct insn *insn)
|
||||||
@@ -149,17 +161,6 @@ static inline void insn_get_attribute(struct insn *insn)
|
|||||||
/* Instruction uses RIP-relative addressing */
|
/* Instruction uses RIP-relative addressing */
|
||||||
extern int insn_rip_relative(struct insn *insn);
|
extern int insn_rip_relative(struct insn *insn);
|
||||||
|
|
||||||
/* Init insn for kernel text */
|
|
||||||
static inline void kernel_insn_init(struct insn *insn,
|
|
||||||
const void *kaddr, int buf_len)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
insn_init(insn, kaddr, buf_len, 1);
|
|
||||||
#else /* CONFIG_X86_32 */
|
|
||||||
insn_init(insn, kaddr, buf_len, 0);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int insn_is_avx(struct insn *insn)
|
static inline int insn_is_avx(struct insn *insn)
|
||||||
{
|
{
|
||||||
if (!insn->prefixes.got)
|
if (!insn->prefixes.got)
|
||||||
@@ -179,13 +180,6 @@ static inline int insn_has_emulate_prefix(struct insn *insn)
|
|||||||
return !!insn->emulate_prefix_size;
|
return !!insn->emulate_prefix_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure this instruction is decoded completely */
|
|
||||||
static inline int insn_complete(struct insn *insn)
|
|
||||||
{
|
|
||||||
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
|
|
||||||
insn->displacement.got && insn->immediate.got;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
|
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
|
||||||
{
|
{
|
||||||
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
|
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
|
||||||
|
|||||||
@@ -32,7 +32,9 @@
|
|||||||
* _EP - 2 socket server parts
|
* _EP - 2 socket server parts
|
||||||
* _EX - 4+ socket server parts
|
* _EX - 4+ socket server parts
|
||||||
*
|
*
|
||||||
* The #define line may optionally include a comment including platform names.
|
* The #define line may optionally include a comment including platform or core
|
||||||
|
* names. An exception is made for skylake/kabylake where steppings seem to have gotten
|
||||||
|
* their own names :-(
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
|
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
|
||||||
@@ -69,35 +71,41 @@
|
|||||||
#define INTEL_FAM6_BROADWELL_X 0x4F
|
#define INTEL_FAM6_BROADWELL_X 0x4F
|
||||||
#define INTEL_FAM6_BROADWELL_D 0x56
|
#define INTEL_FAM6_BROADWELL_D 0x56
|
||||||
|
|
||||||
#define INTEL_FAM6_SKYLAKE_L 0x4E
|
#define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */
|
||||||
#define INTEL_FAM6_SKYLAKE 0x5E
|
#define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */
|
||||||
#define INTEL_FAM6_SKYLAKE_X 0x55
|
#define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */
|
||||||
#define INTEL_FAM6_KABYLAKE_L 0x8E
|
/* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */
|
||||||
#define INTEL_FAM6_KABYLAKE 0x9E
|
/* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */
|
||||||
|
|
||||||
#define INTEL_FAM6_CANNONLAKE_L 0x66
|
#define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */
|
||||||
|
/* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */
|
||||||
|
/* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */
|
||||||
|
/* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */
|
||||||
|
|
||||||
#define INTEL_FAM6_ICELAKE_X 0x6A
|
#define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */
|
||||||
#define INTEL_FAM6_ICELAKE_D 0x6C
|
/* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */
|
||||||
#define INTEL_FAM6_ICELAKE 0x7D
|
|
||||||
#define INTEL_FAM6_ICELAKE_L 0x7E
|
|
||||||
#define INTEL_FAM6_ICELAKE_NNPI 0x9D
|
|
||||||
|
|
||||||
#define INTEL_FAM6_TIGERLAKE_L 0x8C
|
#define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */
|
||||||
#define INTEL_FAM6_TIGERLAKE 0x8D
|
#define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */
|
||||||
|
|
||||||
#define INTEL_FAM6_COMETLAKE 0xA5
|
#define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */
|
||||||
#define INTEL_FAM6_COMETLAKE_L 0xA6
|
|
||||||
|
|
||||||
#define INTEL_FAM6_ROCKETLAKE 0xA7
|
#define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */
|
||||||
|
#define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */
|
||||||
|
#define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */
|
||||||
|
#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */
|
||||||
|
#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */
|
||||||
|
|
||||||
#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
|
#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
|
||||||
|
|
||||||
/* Hybrid Core/Atom Processors */
|
#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */
|
||||||
|
|
||||||
#define INTEL_FAM6_LAKEFIELD 0x8A
|
#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
|
||||||
#define INTEL_FAM6_ALDERLAKE 0x97
|
#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
|
||||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A
|
#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Willow Cove */
|
||||||
|
|
||||||
|
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
|
||||||
|
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
|
||||||
|
|
||||||
/* "Small Core" Processors (Atom) */
|
/* "Small Core" Processors (Atom) */
|
||||||
|
|
||||||
|
|||||||
@@ -6,12 +6,6 @@
|
|||||||
|
|
||||||
#define JUMP_LABEL_NOP_SIZE 5
|
#define JUMP_LABEL_NOP_SIZE 5
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
|
|
||||||
#else
|
|
||||||
# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/nops.h>
|
#include <asm/nops.h>
|
||||||
|
|
||||||
@@ -23,7 +17,7 @@
|
|||||||
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
||||||
{
|
{
|
||||||
asm_volatile_goto("1:"
|
asm_volatile_goto("1:"
|
||||||
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
|
".byte " __stringify(BYTES_NOP5) "\n\t"
|
||||||
".pushsection __jump_table, \"aw\" \n\t"
|
".pushsection __jump_table, \"aw\" \n\t"
|
||||||
_ASM_ALIGN "\n\t"
|
_ASM_ALIGN "\n\t"
|
||||||
".long 1b - ., %l[l_yes] - . \n\t"
|
".long 1b - ., %l[l_yes] - . \n\t"
|
||||||
@@ -63,7 +57,7 @@ l_yes:
|
|||||||
.long \target - .Lstatic_jump_after_\@
|
.long \target - .Lstatic_jump_after_\@
|
||||||
.Lstatic_jump_after_\@:
|
.Lstatic_jump_after_\@:
|
||||||
.else
|
.else
|
||||||
.byte STATIC_KEY_INIT_NOP
|
.byte BYTES_NOP5
|
||||||
.endif
|
.endif
|
||||||
.pushsection __jump_table, "aw"
|
.pushsection __jump_table, "aw"
|
||||||
_ASM_ALIGN
|
_ASM_ALIGN
|
||||||
@@ -75,7 +69,7 @@ l_yes:
|
|||||||
.macro STATIC_JUMP_IF_FALSE target, key, def
|
.macro STATIC_JUMP_IF_FALSE target, key, def
|
||||||
.Lstatic_jump_\@:
|
.Lstatic_jump_\@:
|
||||||
.if \def
|
.if \def
|
||||||
.byte STATIC_KEY_INIT_NOP
|
.byte BYTES_NOP5
|
||||||
.else
|
.else
|
||||||
/* Equivalent to "jmp.d32 \target" */
|
/* Equivalent to "jmp.d32 \target" */
|
||||||
.byte 0xe9
|
.byte 0xe9
|
||||||
|
|||||||
@@ -65,10 +65,22 @@ struct arch_specific_insn {
|
|||||||
* a post_handler).
|
* a post_handler).
|
||||||
*/
|
*/
|
||||||
unsigned boostable:1;
|
unsigned boostable:1;
|
||||||
unsigned if_modifier:1;
|
unsigned char size; /* The size of insn */
|
||||||
unsigned is_call:1;
|
union {
|
||||||
unsigned is_pushf:1;
|
unsigned char opcode;
|
||||||
unsigned is_abs_ip:1;
|
struct {
|
||||||
|
unsigned char type;
|
||||||
|
} jcc;
|
||||||
|
struct {
|
||||||
|
unsigned char type;
|
||||||
|
unsigned char asize;
|
||||||
|
} loop;
|
||||||
|
struct {
|
||||||
|
unsigned char reg;
|
||||||
|
} indirect;
|
||||||
|
};
|
||||||
|
s32 rel32; /* relative offset must be s32, s16, or s8 */
|
||||||
|
void (*emulate_op)(struct kprobe *p, struct pt_regs *regs);
|
||||||
/* Number of bytes of text poked */
|
/* Number of bytes of text poked */
|
||||||
int tp_len;
|
int tp_len;
|
||||||
};
|
};
|
||||||
@@ -107,7 +119,6 @@ extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
|||||||
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||||
unsigned long val, void *data);
|
unsigned long val, void *data);
|
||||||
extern int kprobe_int3_handler(struct pt_regs *regs);
|
extern int kprobe_int3_handler(struct pt_regs *regs);
|
||||||
extern int kprobe_debug_handler(struct pt_regs *regs);
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
|||||||
@@ -4,89 +4,58 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Define nops for use with alternative() and for tracing.
|
* Define nops for use with alternative() and for tracing.
|
||||||
*
|
|
||||||
* *_NOP5_ATOMIC must be a single instruction.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define NOP_DS_PREFIX 0x3e
|
#ifndef CONFIG_64BIT
|
||||||
|
|
||||||
/* generic versions from gas
|
/*
|
||||||
1: nop
|
* Generic 32bit nops from GAS:
|
||||||
the following instructions are NOT nops in 64-bit mode,
|
*
|
||||||
for 64-bit mode use K8 or P6 nops instead
|
* 1: nop
|
||||||
2: movl %esi,%esi
|
* 2: movl %esi,%esi
|
||||||
3: leal 0x00(%esi),%esi
|
* 3: leal 0x0(%esi),%esi
|
||||||
4: leal 0x00(,%esi,1),%esi
|
* 4: leal 0x0(%esi,%eiz,1),%esi
|
||||||
6: leal 0x00000000(%esi),%esi
|
* 5: leal %ds:0x0(%esi,%eiz,1),%esi
|
||||||
7: leal 0x00000000(,%esi,1),%esi
|
* 6: leal 0x0(%esi),%esi
|
||||||
*/
|
* 7: leal 0x0(%esi,%eiz,1),%esi
|
||||||
#define GENERIC_NOP1 0x90
|
* 8: leal %ds:0x0(%esi,%eiz,1),%esi
|
||||||
#define GENERIC_NOP2 0x89,0xf6
|
*
|
||||||
#define GENERIC_NOP3 0x8d,0x76,0x00
|
* Except 5 and 8, which are DS prefixed 4 and 7 resp, where GAS would emit 2
|
||||||
#define GENERIC_NOP4 0x8d,0x74,0x26,0x00
|
* nop instructions.
|
||||||
#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4
|
*/
|
||||||
#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
|
#define BYTES_NOP1 0x90
|
||||||
#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
|
#define BYTES_NOP2 0x89,0xf6
|
||||||
#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7
|
#define BYTES_NOP3 0x8d,0x76,0x00
|
||||||
#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4
|
#define BYTES_NOP4 0x8d,0x74,0x26,0x00
|
||||||
|
#define BYTES_NOP5 0x3e,BYTES_NOP4
|
||||||
|
#define BYTES_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
|
||||||
|
#define BYTES_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
|
||||||
|
#define BYTES_NOP8 0x3e,BYTES_NOP7
|
||||||
|
|
||||||
/* Opteron 64bit nops
|
#else
|
||||||
1: nop
|
|
||||||
2: osp nop
|
|
||||||
3: osp osp nop
|
|
||||||
4: osp osp osp nop
|
|
||||||
*/
|
|
||||||
#define K8_NOP1 GENERIC_NOP1
|
|
||||||
#define K8_NOP2 0x66,K8_NOP1
|
|
||||||
#define K8_NOP3 0x66,K8_NOP2
|
|
||||||
#define K8_NOP4 0x66,K8_NOP3
|
|
||||||
#define K8_NOP5 K8_NOP3,K8_NOP2
|
|
||||||
#define K8_NOP6 K8_NOP3,K8_NOP3
|
|
||||||
#define K8_NOP7 K8_NOP4,K8_NOP3
|
|
||||||
#define K8_NOP8 K8_NOP4,K8_NOP4
|
|
||||||
#define K8_NOP5_ATOMIC 0x66,K8_NOP4
|
|
||||||
|
|
||||||
/* K7 nops
|
/*
|
||||||
uses eax dependencies (arbitrary choice)
|
* Generic 64bit nops from GAS:
|
||||||
1: nop
|
*
|
||||||
2: movl %eax,%eax
|
* 1: nop
|
||||||
3: leal (,%eax,1),%eax
|
* 2: osp nop
|
||||||
4: leal 0x00(,%eax,1),%eax
|
* 3: nopl (%eax)
|
||||||
6: leal 0x00000000(%eax),%eax
|
* 4: nopl 0x00(%eax)
|
||||||
7: leal 0x00000000(,%eax,1),%eax
|
* 5: nopl 0x00(%eax,%eax,1)
|
||||||
*/
|
* 6: osp nopl 0x00(%eax,%eax,1)
|
||||||
#define K7_NOP1 GENERIC_NOP1
|
* 7: nopl 0x00000000(%eax)
|
||||||
#define K7_NOP2 0x8b,0xc0
|
* 8: nopl 0x00000000(%eax,%eax,1)
|
||||||
#define K7_NOP3 0x8d,0x04,0x20
|
*/
|
||||||
#define K7_NOP4 0x8d,0x44,0x20,0x00
|
#define BYTES_NOP1 0x90
|
||||||
#define K7_NOP5 K7_NOP4,K7_NOP1
|
#define BYTES_NOP2 0x66,BYTES_NOP1
|
||||||
#define K7_NOP6 0x8d,0x80,0,0,0,0
|
#define BYTES_NOP3 0x0f,0x1f,0x00
|
||||||
#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0
|
#define BYTES_NOP4 0x0f,0x1f,0x40,0x00
|
||||||
#define K7_NOP8 K7_NOP7,K7_NOP1
|
#define BYTES_NOP5 0x0f,0x1f,0x44,0x00,0x00
|
||||||
#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4
|
#define BYTES_NOP6 0x66,BYTES_NOP5
|
||||||
|
#define BYTES_NOP7 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00
|
||||||
|
#define BYTES_NOP8 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
|
||||||
|
|
||||||
/* P6 nops
|
#endif /* CONFIG_64BIT */
|
||||||
uses eax dependencies (Intel-recommended choice)
|
|
||||||
1: nop
|
|
||||||
2: osp nop
|
|
||||||
3: nopl (%eax)
|
|
||||||
4: nopl 0x00(%eax)
|
|
||||||
5: nopl 0x00(%eax,%eax,1)
|
|
||||||
6: osp nopl 0x00(%eax,%eax,1)
|
|
||||||
7: nopl 0x00000000(%eax)
|
|
||||||
8: nopl 0x00000000(%eax,%eax,1)
|
|
||||||
Note: All the above are assumed to be a single instruction.
|
|
||||||
There is kernel code that depends on this.
|
|
||||||
*/
|
|
||||||
#define P6_NOP1 GENERIC_NOP1
|
|
||||||
#define P6_NOP2 0x66,0x90
|
|
||||||
#define P6_NOP3 0x0f,0x1f,0x00
|
|
||||||
#define P6_NOP4 0x0f,0x1f,0x40,0
|
|
||||||
#define P6_NOP5 0x0f,0x1f,0x44,0x00,0
|
|
||||||
#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0
|
|
||||||
#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0
|
|
||||||
#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
|
|
||||||
#define P6_NOP5_ATOMIC P6_NOP5
|
|
||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
#define _ASM_MK_NOP(x) .byte x
|
#define _ASM_MK_NOP(x) .byte x
|
||||||
@@ -94,54 +63,19 @@
|
|||||||
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
|
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_MK7)
|
#define ASM_NOP1 _ASM_MK_NOP(BYTES_NOP1)
|
||||||
#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
|
#define ASM_NOP2 _ASM_MK_NOP(BYTES_NOP2)
|
||||||
#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2)
|
#define ASM_NOP3 _ASM_MK_NOP(BYTES_NOP3)
|
||||||
#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3)
|
#define ASM_NOP4 _ASM_MK_NOP(BYTES_NOP4)
|
||||||
#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4)
|
#define ASM_NOP5 _ASM_MK_NOP(BYTES_NOP5)
|
||||||
#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5)
|
#define ASM_NOP6 _ASM_MK_NOP(BYTES_NOP6)
|
||||||
#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6)
|
#define ASM_NOP7 _ASM_MK_NOP(BYTES_NOP7)
|
||||||
#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7)
|
#define ASM_NOP8 _ASM_MK_NOP(BYTES_NOP8)
|
||||||
#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8)
|
|
||||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC)
|
|
||||||
#elif defined(CONFIG_X86_P6_NOP)
|
|
||||||
#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1)
|
|
||||||
#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2)
|
|
||||||
#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3)
|
|
||||||
#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4)
|
|
||||||
#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5)
|
|
||||||
#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6)
|
|
||||||
#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7)
|
|
||||||
#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8)
|
|
||||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC)
|
|
||||||
#elif defined(CONFIG_X86_64)
|
|
||||||
#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1)
|
|
||||||
#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2)
|
|
||||||
#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3)
|
|
||||||
#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4)
|
|
||||||
#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5)
|
|
||||||
#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6)
|
|
||||||
#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7)
|
|
||||||
#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
|
|
||||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC)
|
|
||||||
#else
|
|
||||||
#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1)
|
|
||||||
#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2)
|
|
||||||
#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3)
|
|
||||||
#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4)
|
|
||||||
#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5)
|
|
||||||
#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6)
|
|
||||||
#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7)
|
|
||||||
#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8)
|
|
||||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define ASM_NOP_MAX 8
|
#define ASM_NOP_MAX 8
|
||||||
#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
extern const unsigned char * const *ideal_nops;
|
extern const unsigned char * const x86_nops[];
|
||||||
extern void arch_init_ideal_nops(void);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_NOPS_H */
|
#endif /* _ASM_X86_NOPS_H */
|
||||||
|
|||||||
@@ -80,7 +80,7 @@
|
|||||||
.macro JMP_NOSPEC reg:req
|
.macro JMP_NOSPEC reg:req
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||||
__stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
|
__stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
||||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
|
||||||
#else
|
#else
|
||||||
jmp *%\reg
|
jmp *%\reg
|
||||||
@@ -90,7 +90,7 @@
|
|||||||
.macro CALL_NOSPEC reg:req
|
.macro CALL_NOSPEC reg:req
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
|
||||||
__stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
|
__stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
||||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD
|
||||||
#else
|
#else
|
||||||
call *%\reg
|
call *%\reg
|
||||||
@@ -128,7 +128,7 @@
|
|||||||
ALTERNATIVE_2( \
|
ALTERNATIVE_2( \
|
||||||
ANNOTATE_RETPOLINE_SAFE \
|
ANNOTATE_RETPOLINE_SAFE \
|
||||||
"call *%[thunk_target]\n", \
|
"call *%[thunk_target]\n", \
|
||||||
"call __x86_retpoline_%V[thunk_target]\n", \
|
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
||||||
X86_FEATURE_RETPOLINE, \
|
X86_FEATURE_RETPOLINE, \
|
||||||
"lfence;\n" \
|
"lfence;\n" \
|
||||||
ANNOTATE_RETPOLINE_SAFE \
|
ANNOTATE_RETPOLINE_SAFE \
|
||||||
|
|||||||
@@ -429,6 +429,9 @@ struct fixed_percpu_data {
|
|||||||
* GCC hardcodes the stack canary as %gs:40. Since the
|
* GCC hardcodes the stack canary as %gs:40. Since the
|
||||||
* irq_stack is the object at %gs:0, we reserve the bottom
|
* irq_stack is the object at %gs:0, we reserve the bottom
|
||||||
* 48 bytes of the irq stack for the canary.
|
* 48 bytes of the irq stack for the canary.
|
||||||
|
*
|
||||||
|
* Once we are willing to require -mstack-protector-guard-symbol=
|
||||||
|
* support for x86_64 stackprotector, we can get rid of this.
|
||||||
*/
|
*/
|
||||||
char gs_base[40];
|
char gs_base[40];
|
||||||
unsigned long stack_canary;
|
unsigned long stack_canary;
|
||||||
@@ -450,17 +453,7 @@ extern asmlinkage void ignore_sysret(void);
|
|||||||
void current_save_fsgs(void);
|
void current_save_fsgs(void);
|
||||||
#else /* X86_64 */
|
#else /* X86_64 */
|
||||||
#ifdef CONFIG_STACKPROTECTOR
|
#ifdef CONFIG_STACKPROTECTOR
|
||||||
/*
|
DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
|
||||||
* Make sure stack canary segment base is cached-aligned:
|
|
||||||
* "For Intel Atom processors, avoid non zero segment base address
|
|
||||||
* that is not aligned to cache line boundary at all cost."
|
|
||||||
* (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
|
|
||||||
*/
|
|
||||||
struct stack_canary {
|
|
||||||
char __pad[20]; /* canary at %gs:20 */
|
|
||||||
unsigned long canary;
|
|
||||||
};
|
|
||||||
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
|
||||||
#endif
|
#endif
|
||||||
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
||||||
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
|
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
|
||||||
|
|||||||
@@ -37,7 +37,10 @@ struct pt_regs {
|
|||||||
unsigned short __esh;
|
unsigned short __esh;
|
||||||
unsigned short fs;
|
unsigned short fs;
|
||||||
unsigned short __fsh;
|
unsigned short __fsh;
|
||||||
/* On interrupt, gs and __gsh store the vector number. */
|
/*
|
||||||
|
* On interrupt, gs and __gsh store the vector number. They never
|
||||||
|
* store gs any more.
|
||||||
|
*/
|
||||||
unsigned short gs;
|
unsigned short gs;
|
||||||
unsigned short __gsh;
|
unsigned short __gsh;
|
||||||
/* On interrupt, this is the error code. */
|
/* On interrupt, this is the error code. */
|
||||||
|
|||||||
@@ -95,7 +95,7 @@
|
|||||||
*
|
*
|
||||||
* 26 - ESPFIX small SS
|
* 26 - ESPFIX small SS
|
||||||
* 27 - per-cpu [ offset to per-cpu data area ]
|
* 27 - per-cpu [ offset to per-cpu data area ]
|
||||||
* 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8
|
* 28 - unused
|
||||||
* 29 - unused
|
* 29 - unused
|
||||||
* 30 - unused
|
* 30 - unused
|
||||||
* 31 - TSS for double fault handler
|
* 31 - TSS for double fault handler
|
||||||
@@ -118,7 +118,6 @@
|
|||||||
|
|
||||||
#define GDT_ENTRY_ESPFIX_SS 26
|
#define GDT_ENTRY_ESPFIX_SS 26
|
||||||
#define GDT_ENTRY_PERCPU 27
|
#define GDT_ENTRY_PERCPU 27
|
||||||
#define GDT_ENTRY_STACK_CANARY 28
|
|
||||||
|
|
||||||
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
|
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
|
||||||
|
|
||||||
@@ -158,12 +157,6 @@
|
|||||||
# define __KERNEL_PERCPU 0
|
# define __KERNEL_PERCPU 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_STACKPROTECTOR
|
|
||||||
# define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
|
|
||||||
#else
|
|
||||||
# define __KERNEL_STACK_CANARY 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#else /* 64-bit: */
|
#else /* 64-bit: */
|
||||||
|
|
||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
@@ -364,22 +357,15 @@ static inline void __loadsegment_fs(unsigned short value)
|
|||||||
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
|
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* x86-32 user GS accessors:
|
* x86-32 user GS accessors. This is ugly and could do with some cleaning up.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
# ifdef CONFIG_X86_32_LAZY_GS
|
# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; })
|
||||||
# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; })
|
# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
|
||||||
# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
|
# define task_user_gs(tsk) ((tsk)->thread.gs)
|
||||||
# define task_user_gs(tsk) ((tsk)->thread.gs)
|
# define lazy_save_gs(v) savesegment(gs, (v))
|
||||||
# define lazy_save_gs(v) savesegment(gs, (v))
|
# define lazy_load_gs(v) loadsegment(gs, (v))
|
||||||
# define lazy_load_gs(v) loadsegment(gs, (v))
|
# define load_gs_index(v) loadsegment(gs, (v))
|
||||||
# else /* X86_32_LAZY_GS */
|
|
||||||
# define get_user_gs(regs) (u16)((regs)->gs)
|
|
||||||
# define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
|
|
||||||
# define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
|
|
||||||
# define lazy_save_gs(v) do { } while (0)
|
|
||||||
# define lazy_load_gs(v) do { } while (0)
|
|
||||||
# endif /* X86_32_LAZY_GS */
|
|
||||||
#endif /* X86_32 */
|
#endif /* X86_32 */
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|||||||
@@ -214,7 +214,7 @@ static inline void clflush(volatile void *__p)
|
|||||||
|
|
||||||
static inline void clflushopt(volatile void *__p)
|
static inline void clflushopt(volatile void *__p)
|
||||||
{
|
{
|
||||||
alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
|
alternative_io(".byte 0x3e; clflush %P0",
|
||||||
".byte 0x66; clflush %P0",
|
".byte 0x66; clflush %P0",
|
||||||
X86_FEATURE_CLFLUSHOPT,
|
X86_FEATURE_CLFLUSHOPT,
|
||||||
"+m" (*(volatile char __force *)__p));
|
"+m" (*(volatile char __force *)__p));
|
||||||
@@ -225,7 +225,7 @@ static inline void clwb(volatile void *__p)
|
|||||||
volatile struct { char x[64]; } *p = __p;
|
volatile struct { char x[64]; } *p = __p;
|
||||||
|
|
||||||
asm volatile(ALTERNATIVE_2(
|
asm volatile(ALTERNATIVE_2(
|
||||||
".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
|
".byte 0x3e; clflush (%[pax])",
|
||||||
".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
|
".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
|
||||||
X86_FEATURE_CLFLUSHOPT,
|
X86_FEATURE_CLFLUSHOPT,
|
||||||
".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
|
".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
|
||||||
|
|||||||
@@ -5,30 +5,23 @@
|
|||||||
* Stack protector works by putting predefined pattern at the start of
|
* Stack protector works by putting predefined pattern at the start of
|
||||||
* the stack frame and verifying that it hasn't been overwritten when
|
* the stack frame and verifying that it hasn't been overwritten when
|
||||||
* returning from the function. The pattern is called stack canary
|
* returning from the function. The pattern is called stack canary
|
||||||
* and unfortunately gcc requires it to be at a fixed offset from %gs.
|
* and unfortunately gcc historically required it to be at a fixed offset
|
||||||
* On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
|
* from the percpu segment base. On x86_64, the offset is 40 bytes.
|
||||||
* and x86_32 use segment registers differently and thus handles this
|
|
||||||
* requirement differently.
|
|
||||||
*
|
*
|
||||||
* On x86_64, %gs is shared by percpu area and stack canary. All
|
* The same segment is shared by percpu area and stack canary. On
|
||||||
* percpu symbols are zero based and %gs points to the base of percpu
|
* x86_64, percpu symbols are zero based and %gs (64-bit) points to the
|
||||||
* area. The first occupant of the percpu area is always
|
* base of percpu area. The first occupant of the percpu area is always
|
||||||
* fixed_percpu_data which contains stack_canary at offset 40. Userland
|
* fixed_percpu_data which contains stack_canary at the approproate
|
||||||
* %gs is always saved and restored on kernel entry and exit using
|
* offset. On x86_32, the stack canary is just a regular percpu
|
||||||
* swapgs, so stack protector doesn't add any complexity there.
|
* variable.
|
||||||
*
|
*
|
||||||
* On x86_32, it's slightly more complicated. As in x86_64, %gs is
|
* Putting percpu data in %fs on 32-bit is a minor optimization compared to
|
||||||
* used for userland TLS. Unfortunately, some processors are much
|
* using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
|
||||||
* slower at loading segment registers with different value when
|
* to load 0 into %fs on exit to usermode, whereas with percpu data in
|
||||||
* entering and leaving the kernel, so the kernel uses %fs for percpu
|
* %gs, we are likely to load a non-null %gs on return to user mode.
|
||||||
* area and manages %gs lazily so that %gs is switched only when
|
|
||||||
* necessary, usually during task switch.
|
|
||||||
*
|
*
|
||||||
* As gcc requires the stack canary at %gs:20, %gs can't be managed
|
* Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
|
||||||
* lazily if stack protector is enabled, so the kernel saves and
|
* support, we can remove some of this complexity.
|
||||||
* restores userland %gs on kernel entry and exit. This behavior is
|
|
||||||
* controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
|
|
||||||
* system.h to hide the details.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _ASM_STACKPROTECTOR_H
|
#ifndef _ASM_STACKPROTECTOR_H
|
||||||
@@ -44,14 +37,6 @@
|
|||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* 24 byte read-only segment initializer for stack canary. Linker
|
|
||||||
* can't handle the address bit shifting. Address will be set in
|
|
||||||
* head_32 for boot CPU and setup_per_cpu_areas() for others.
|
|
||||||
*/
|
|
||||||
#define GDT_STACK_CANARY_INIT \
|
|
||||||
[GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the stackprotector canary value.
|
* Initialize the stackprotector canary value.
|
||||||
*
|
*
|
||||||
@@ -86,7 +71,7 @@ static __always_inline void boot_init_stack_canary(void)
|
|||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
this_cpu_write(fixed_percpu_data.stack_canary, canary);
|
this_cpu_write(fixed_percpu_data.stack_canary, canary);
|
||||||
#else
|
#else
|
||||||
this_cpu_write(stack_canary.canary, canary);
|
this_cpu_write(__stack_chk_guard, canary);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,48 +80,16 @@ static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
|
|||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
|
per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
|
||||||
#else
|
#else
|
||||||
per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
|
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void setup_stack_canary_segment(int cpu)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
|
|
||||||
struct desc_struct *gdt_table = get_cpu_gdt_rw(cpu);
|
|
||||||
struct desc_struct desc;
|
|
||||||
|
|
||||||
desc = gdt_table[GDT_ENTRY_STACK_CANARY];
|
|
||||||
set_desc_base(&desc, canary);
|
|
||||||
write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void load_stack_canary_segment(void)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* STACKPROTECTOR */
|
#else /* STACKPROTECTOR */
|
||||||
|
|
||||||
#define GDT_STACK_CANARY_INIT
|
|
||||||
|
|
||||||
/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
|
/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
|
||||||
|
|
||||||
static inline void setup_stack_canary_segment(int cpu)
|
|
||||||
{ }
|
|
||||||
|
|
||||||
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
|
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
static inline void load_stack_canary_segment(void)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
asm volatile ("mov %0, %%gs" : : "r" (0));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* STACKPROTECTOR */
|
#endif /* STACKPROTECTOR */
|
||||||
#endif /* _ASM_STACKPROTECTOR_H */
|
#endif /* _ASM_STACKPROTECTOR_H */
|
||||||
|
|||||||
@@ -13,12 +13,10 @@
|
|||||||
/* image of the saved processor state */
|
/* image of the saved processor state */
|
||||||
struct saved_context {
|
struct saved_context {
|
||||||
/*
|
/*
|
||||||
* On x86_32, all segment registers, with the possible exception of
|
* On x86_32, all segment registers except gs are saved at kernel
|
||||||
* gs, are saved at kernel entry in pt_regs.
|
* entry in pt_regs.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86_32_LAZY_GS
|
|
||||||
u16 gs;
|
u16 gs;
|
||||||
#endif
|
|
||||||
unsigned long cr0, cr2, cr3, cr4;
|
unsigned long cr0, cr2, cr3, cr4;
|
||||||
u64 misc_enable;
|
u64 misc_enable;
|
||||||
bool misc_enable_saved;
|
bool misc_enable_saved;
|
||||||
|
|||||||
@@ -75,186 +75,30 @@ do { \
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
const unsigned char x86nops[] =
|
||||||
* Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
|
|
||||||
* that correspond to that nop. Getting from one nop to the next, we
|
|
||||||
* add to the array the offset that is equal to the sum of all sizes of
|
|
||||||
* nops preceding the one we are after.
|
|
||||||
*
|
|
||||||
* Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
|
|
||||||
* nice symmetry of sizes of the previous nops.
|
|
||||||
*/
|
|
||||||
#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
|
|
||||||
static const unsigned char intelnops[] =
|
|
||||||
{
|
{
|
||||||
GENERIC_NOP1,
|
BYTES_NOP1,
|
||||||
GENERIC_NOP2,
|
BYTES_NOP2,
|
||||||
GENERIC_NOP3,
|
BYTES_NOP3,
|
||||||
GENERIC_NOP4,
|
BYTES_NOP4,
|
||||||
GENERIC_NOP5,
|
BYTES_NOP5,
|
||||||
GENERIC_NOP6,
|
BYTES_NOP6,
|
||||||
GENERIC_NOP7,
|
BYTES_NOP7,
|
||||||
GENERIC_NOP8,
|
BYTES_NOP8,
|
||||||
GENERIC_NOP5_ATOMIC
|
|
||||||
};
|
};
|
||||||
static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
|
|
||||||
|
const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
|
||||||
{
|
{
|
||||||
NULL,
|
NULL,
|
||||||
intelnops,
|
x86nops,
|
||||||
intelnops + 1,
|
x86nops + 1,
|
||||||
intelnops + 1 + 2,
|
x86nops + 1 + 2,
|
||||||
intelnops + 1 + 2 + 3,
|
x86nops + 1 + 2 + 3,
|
||||||
intelnops + 1 + 2 + 3 + 4,
|
x86nops + 1 + 2 + 3 + 4,
|
||||||
intelnops + 1 + 2 + 3 + 4 + 5,
|
x86nops + 1 + 2 + 3 + 4 + 5,
|
||||||
intelnops + 1 + 2 + 3 + 4 + 5 + 6,
|
x86nops + 1 + 2 + 3 + 4 + 5 + 6,
|
||||||
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
||||||
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
|
||||||
};
|
};
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef K8_NOP1
|
|
||||||
static const unsigned char k8nops[] =
|
|
||||||
{
|
|
||||||
K8_NOP1,
|
|
||||||
K8_NOP2,
|
|
||||||
K8_NOP3,
|
|
||||||
K8_NOP4,
|
|
||||||
K8_NOP5,
|
|
||||||
K8_NOP6,
|
|
||||||
K8_NOP7,
|
|
||||||
K8_NOP8,
|
|
||||||
K8_NOP5_ATOMIC
|
|
||||||
};
|
|
||||||
static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
|
|
||||||
{
|
|
||||||
NULL,
|
|
||||||
k8nops,
|
|
||||||
k8nops + 1,
|
|
||||||
k8nops + 1 + 2,
|
|
||||||
k8nops + 1 + 2 + 3,
|
|
||||||
k8nops + 1 + 2 + 3 + 4,
|
|
||||||
k8nops + 1 + 2 + 3 + 4 + 5,
|
|
||||||
k8nops + 1 + 2 + 3 + 4 + 5 + 6,
|
|
||||||
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
|
||||||
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
|
|
||||||
static const unsigned char k7nops[] =
|
|
||||||
{
|
|
||||||
K7_NOP1,
|
|
||||||
K7_NOP2,
|
|
||||||
K7_NOP3,
|
|
||||||
K7_NOP4,
|
|
||||||
K7_NOP5,
|
|
||||||
K7_NOP6,
|
|
||||||
K7_NOP7,
|
|
||||||
K7_NOP8,
|
|
||||||
K7_NOP5_ATOMIC
|
|
||||||
};
|
|
||||||
static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
|
|
||||||
{
|
|
||||||
NULL,
|
|
||||||
k7nops,
|
|
||||||
k7nops + 1,
|
|
||||||
k7nops + 1 + 2,
|
|
||||||
k7nops + 1 + 2 + 3,
|
|
||||||
k7nops + 1 + 2 + 3 + 4,
|
|
||||||
k7nops + 1 + 2 + 3 + 4 + 5,
|
|
||||||
k7nops + 1 + 2 + 3 + 4 + 5 + 6,
|
|
||||||
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
|
||||||
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef P6_NOP1
|
|
||||||
static const unsigned char p6nops[] =
|
|
||||||
{
|
|
||||||
P6_NOP1,
|
|
||||||
P6_NOP2,
|
|
||||||
P6_NOP3,
|
|
||||||
P6_NOP4,
|
|
||||||
P6_NOP5,
|
|
||||||
P6_NOP6,
|
|
||||||
P6_NOP7,
|
|
||||||
P6_NOP8,
|
|
||||||
P6_NOP5_ATOMIC
|
|
||||||
};
|
|
||||||
static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
|
|
||||||
{
|
|
||||||
NULL,
|
|
||||||
p6nops,
|
|
||||||
p6nops + 1,
|
|
||||||
p6nops + 1 + 2,
|
|
||||||
p6nops + 1 + 2 + 3,
|
|
||||||
p6nops + 1 + 2 + 3 + 4,
|
|
||||||
p6nops + 1 + 2 + 3 + 4 + 5,
|
|
||||||
p6nops + 1 + 2 + 3 + 4 + 5 + 6,
|
|
||||||
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
|
||||||
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Initialize these to a safe default */
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
const unsigned char * const *ideal_nops = p6_nops;
|
|
||||||
#else
|
|
||||||
const unsigned char * const *ideal_nops = intel_nops;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void __init arch_init_ideal_nops(void)
|
|
||||||
{
|
|
||||||
switch (boot_cpu_data.x86_vendor) {
|
|
||||||
case X86_VENDOR_INTEL:
|
|
||||||
/*
|
|
||||||
* Due to a decoder implementation quirk, some
|
|
||||||
* specific Intel CPUs actually perform better with
|
|
||||||
* the "k8_nops" than with the SDM-recommended NOPs.
|
|
||||||
*/
|
|
||||||
if (boot_cpu_data.x86 == 6 &&
|
|
||||||
boot_cpu_data.x86_model >= 0x0f &&
|
|
||||||
boot_cpu_data.x86_model != 0x1c &&
|
|
||||||
boot_cpu_data.x86_model != 0x26 &&
|
|
||||||
boot_cpu_data.x86_model != 0x27 &&
|
|
||||||
boot_cpu_data.x86_model < 0x30) {
|
|
||||||
ideal_nops = k8_nops;
|
|
||||||
} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
|
|
||||||
ideal_nops = p6_nops;
|
|
||||||
} else {
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
ideal_nops = k8_nops;
|
|
||||||
#else
|
|
||||||
ideal_nops = intel_nops;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case X86_VENDOR_HYGON:
|
|
||||||
ideal_nops = p6_nops;
|
|
||||||
return;
|
|
||||||
|
|
||||||
case X86_VENDOR_AMD:
|
|
||||||
if (boot_cpu_data.x86 > 0xf) {
|
|
||||||
ideal_nops = p6_nops;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
fallthrough;
|
|
||||||
|
|
||||||
default:
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
ideal_nops = k8_nops;
|
|
||||||
#else
|
|
||||||
if (boot_cpu_has(X86_FEATURE_K8))
|
|
||||||
ideal_nops = k8_nops;
|
|
||||||
else if (boot_cpu_has(X86_FEATURE_K7))
|
|
||||||
ideal_nops = k7_nops;
|
|
||||||
else
|
|
||||||
ideal_nops = intel_nops;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
|
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
|
||||||
static void __init_or_module add_nops(void *insns, unsigned int len)
|
static void __init_or_module add_nops(void *insns, unsigned int len)
|
||||||
@@ -263,7 +107,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
|
|||||||
unsigned int noplen = len;
|
unsigned int noplen = len;
|
||||||
if (noplen > ASM_NOP_MAX)
|
if (noplen > ASM_NOP_MAX)
|
||||||
noplen = ASM_NOP_MAX;
|
noplen = ASM_NOP_MAX;
|
||||||
memcpy(insns, ideal_nops[noplen], noplen);
|
memcpy(insns, x86_nops[noplen], noplen);
|
||||||
insns += noplen;
|
insns += noplen;
|
||||||
len -= noplen;
|
len -= noplen;
|
||||||
}
|
}
|
||||||
@@ -345,19 +189,35 @@ done:
|
|||||||
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
|
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
struct insn insn;
|
||||||
|
int nop, i = 0;
|
||||||
|
|
||||||
for (i = 0; i < a->padlen; i++) {
|
/*
|
||||||
if (instr[i] != 0x90)
|
* Jump over the non-NOP insns, the remaining bytes must be single-byte
|
||||||
|
* NOPs, optimize them.
|
||||||
|
*/
|
||||||
|
for (;;) {
|
||||||
|
if (insn_decode_kernel(&insn, &instr[i]))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if ((i += insn.length) >= a->instrlen)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (nop = i; i < a->instrlen; i++) {
|
||||||
|
if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
|
add_nops(instr + nop, i - nop);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
|
DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
|
||||||
instr, a->instrlen - a->padlen, a->padlen);
|
instr, nop, a->instrlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -403,19 +263,15 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
|||||||
* - feature not present but ALTINSTR_FLAG_INV is set to mean,
|
* - feature not present but ALTINSTR_FLAG_INV is set to mean,
|
||||||
* patch if feature is *NOT* present.
|
* patch if feature is *NOT* present.
|
||||||
*/
|
*/
|
||||||
if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV)) {
|
if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
|
||||||
if (a->padlen > 1)
|
goto next;
|
||||||
optimize_nops(a, instr);
|
|
||||||
|
|
||||||
continue;
|
DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
|
||||||
}
|
|
||||||
|
|
||||||
DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
|
|
||||||
(a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
|
(a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
|
||||||
feature >> 5,
|
feature >> 5,
|
||||||
feature & 0x1f,
|
feature & 0x1f,
|
||||||
instr, instr, a->instrlen,
|
instr, instr, a->instrlen,
|
||||||
replacement, a->replacementlen, a->padlen);
|
replacement, a->replacementlen);
|
||||||
|
|
||||||
DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
|
DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
|
||||||
DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
|
DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
|
||||||
@@ -439,14 +295,15 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
|||||||
if (a->replacementlen && is_jmp(replacement[0]))
|
if (a->replacementlen && is_jmp(replacement[0]))
|
||||||
recompute_jump(a, instr, replacement, insn_buff);
|
recompute_jump(a, instr, replacement, insn_buff);
|
||||||
|
|
||||||
if (a->instrlen > a->replacementlen) {
|
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
|
||||||
add_nops(insn_buff + a->replacementlen,
|
insn_buff[insn_buff_sz] = 0x90;
|
||||||
a->instrlen - a->replacementlen);
|
|
||||||
insn_buff_sz += a->instrlen - a->replacementlen;
|
|
||||||
}
|
|
||||||
DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
|
DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
|
||||||
|
|
||||||
text_poke_early(instr, insn_buff, insn_buff_sz);
|
text_poke_early(instr, insn_buff, insn_buff_sz);
|
||||||
|
|
||||||
|
next:
|
||||||
|
optimize_nops(a, instr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1310,15 +1167,15 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
|
|||||||
const void *opcode, size_t len, const void *emulate)
|
const void *opcode, size_t len, const void *emulate)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
|
int ret;
|
||||||
|
|
||||||
memcpy((void *)tp->text, opcode, len);
|
memcpy((void *)tp->text, opcode, len);
|
||||||
if (!emulate)
|
if (!emulate)
|
||||||
emulate = opcode;
|
emulate = opcode;
|
||||||
|
|
||||||
kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
|
ret = insn_decode_kernel(&insn, emulate);
|
||||||
insn_get_length(&insn);
|
|
||||||
|
|
||||||
BUG_ON(!insn_complete(&insn));
|
BUG_ON(ret < 0);
|
||||||
BUG_ON(len != insn.length);
|
BUG_ON(len != insn.length);
|
||||||
|
|
||||||
tp->rel_addr = addr - (void *)_stext;
|
tp->rel_addr = addr - (void *)_stext;
|
||||||
@@ -1338,13 +1195,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
|
|||||||
default: /* assume NOP */
|
default: /* assume NOP */
|
||||||
switch (len) {
|
switch (len) {
|
||||||
case 2: /* NOP2 -- emulate as JMP8+0 */
|
case 2: /* NOP2 -- emulate as JMP8+0 */
|
||||||
BUG_ON(memcmp(emulate, ideal_nops[len], len));
|
BUG_ON(memcmp(emulate, x86_nops[len], len));
|
||||||
tp->opcode = JMP8_INSN_OPCODE;
|
tp->opcode = JMP8_INSN_OPCODE;
|
||||||
tp->rel32 = 0;
|
tp->rel32 = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 5: /* NOP5 -- emulate as JMP32+0 */
|
case 5: /* NOP5 -- emulate as JMP32+0 */
|
||||||
BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
|
BUG_ON(memcmp(emulate, x86_nops[len], len));
|
||||||
tp->opcode = JMP32_INSN_OPCODE;
|
tp->opcode = JMP32_INSN_OPCODE;
|
||||||
tp->rel32 = 0;
|
tp->rel32 = 0;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -53,11 +53,6 @@ void foo(void)
|
|||||||
offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
|
offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
|
||||||
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
|
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
|
||||||
|
|
||||||
#ifdef CONFIG_STACKPROTECTOR
|
|
||||||
BLANK();
|
|
||||||
OFFSET(stack_canary_offset, stack_canary, canary);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
BLANK();
|
BLANK();
|
||||||
DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map));
|
DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -628,11 +628,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
|||||||
|
|
||||||
early_init_amd_mc(c);
|
early_init_amd_mc(c);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
if (c->x86 == 6)
|
|
||||||
set_cpu_cap(c, X86_FEATURE_K7);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (c->x86 >= 0xf)
|
if (c->x86 >= 0xf)
|
||||||
set_cpu_cap(c, X86_FEATURE_K8);
|
set_cpu_cap(c, X86_FEATURE_K8);
|
||||||
|
|
||||||
|
|||||||
@@ -161,7 +161,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
|||||||
|
|
||||||
[GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
[GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
||||||
[GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
[GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
||||||
GDT_STACK_CANARY_INIT
|
|
||||||
#endif
|
#endif
|
||||||
} };
|
} };
|
||||||
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
||||||
@@ -599,7 +598,6 @@ void load_percpu_segment(int cpu)
|
|||||||
__loadsegment_simple(gs, 0);
|
__loadsegment_simple(gs, 0);
|
||||||
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
||||||
#endif
|
#endif
|
||||||
load_stack_canary_segment();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
@@ -1798,7 +1796,8 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
|
|||||||
EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
|
EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
|
||||||
|
|
||||||
#ifdef CONFIG_STACKPROTECTOR
|
#ifdef CONFIG_STACKPROTECTOR
|
||||||
DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
|
||||||
|
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|||||||
@@ -215,12 +215,12 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
|
|||||||
u32 ecx;
|
u32 ecx;
|
||||||
|
|
||||||
ecx = cpuid_ecx(0x8000001e);
|
ecx = cpuid_ecx(0x8000001e);
|
||||||
nodes_per_socket = ((ecx >> 8) & 7) + 1;
|
__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
|
||||||
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
|
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
|
||||||
u64 value;
|
u64 value;
|
||||||
|
|
||||||
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
||||||
nodes_per_socket = ((value >> 3) & 7) + 1;
|
__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
|
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
|
||||||
|
|||||||
@@ -486,6 +486,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
|
|||||||
case INTEL_FAM6_BROADWELL_X:
|
case INTEL_FAM6_BROADWELL_X:
|
||||||
case INTEL_FAM6_SKYLAKE_X:
|
case INTEL_FAM6_SKYLAKE_X:
|
||||||
case INTEL_FAM6_ICELAKE_X:
|
case INTEL_FAM6_ICELAKE_X:
|
||||||
|
case INTEL_FAM6_SAPPHIRERAPIDS_X:
|
||||||
case INTEL_FAM6_XEON_PHI_KNL:
|
case INTEL_FAM6_XEON_PHI_KNL:
|
||||||
case INTEL_FAM6_XEON_PHI_KNM:
|
case INTEL_FAM6_XEON_PHI_KNM:
|
||||||
|
|
||||||
|
|||||||
@@ -218,15 +218,15 @@ static struct severity {
|
|||||||
static bool is_copy_from_user(struct pt_regs *regs)
|
static bool is_copy_from_user(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
u8 insn_buf[MAX_INSN_SIZE];
|
u8 insn_buf[MAX_INSN_SIZE];
|
||||||
struct insn insn;
|
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
struct insn insn;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
|
if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
|
ret = insn_decode_kernel(&insn, insn_buf);
|
||||||
insn_get_opcode(&insn);
|
if (ret < 0)
|
||||||
if (!insn.opcode.got)
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
switch (insn.opcode.value) {
|
switch (insn.opcode.value) {
|
||||||
@@ -234,10 +234,6 @@ static bool is_copy_from_user(struct pt_regs *regs)
|
|||||||
case 0x8A: case 0x8B:
|
case 0x8A: case 0x8B:
|
||||||
/* MOVZ mem,reg */
|
/* MOVZ mem,reg */
|
||||||
case 0xB60F: case 0xB70F:
|
case 0xB60F: case 0xB70F:
|
||||||
insn_get_modrm(&insn);
|
|
||||||
insn_get_sib(&insn);
|
|
||||||
if (!insn.modrm.got || !insn.sib.got)
|
|
||||||
return false;
|
|
||||||
addr = (unsigned long)insn_get_addr_ref(&insn, regs);
|
addr = (unsigned long)insn_get_addr_ref(&insn, regs);
|
||||||
break;
|
break;
|
||||||
/* REP MOVS */
|
/* REP MOVS */
|
||||||
|
|||||||
@@ -100,9 +100,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
|
|||||||
.ss = __KERNEL_DS,
|
.ss = __KERNEL_DS,
|
||||||
.ds = __USER_DS,
|
.ds = __USER_DS,
|
||||||
.fs = __KERNEL_PERCPU,
|
.fs = __KERNEL_PERCPU,
|
||||||
#ifndef CONFIG_X86_32_LAZY_GS
|
.gs = 0,
|
||||||
.gs = __KERNEL_STACK_CANARY,
|
|
||||||
#endif
|
|
||||||
|
|
||||||
.__cr3 = __pa_nodebug(swapper_pg_dir),
|
.__cr3 = __pa_nodebug(swapper_pg_dir),
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ int ftrace_arch_code_modify_post_process(void)
|
|||||||
|
|
||||||
static const char *ftrace_nop_replace(void)
|
static const char *ftrace_nop_replace(void)
|
||||||
{
|
{
|
||||||
return ideal_nops[NOP_ATOMIC5];
|
return x86_nops[5];
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
|
static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
|
||||||
@@ -377,7 +377,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|||||||
ip = trampoline + (jmp_offset - start_offset);
|
ip = trampoline + (jmp_offset - start_offset);
|
||||||
if (WARN_ON(*(char *)ip != 0x75))
|
if (WARN_ON(*(char *)ip != 0x75))
|
||||||
goto fail;
|
goto fail;
|
||||||
ret = copy_from_kernel_nofault(ip, ideal_nops[2], 2);
|
ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -318,8 +318,8 @@ SYM_FUNC_START(startup_32_smp)
|
|||||||
movl $(__KERNEL_PERCPU), %eax
|
movl $(__KERNEL_PERCPU), %eax
|
||||||
movl %eax,%fs # set this cpu's percpu
|
movl %eax,%fs # set this cpu's percpu
|
||||||
|
|
||||||
movl $(__KERNEL_STACK_CANARY),%eax
|
xorl %eax,%eax
|
||||||
movl %eax,%gs
|
movl %eax,%gs # clear possible garbage in %gs
|
||||||
|
|
||||||
xorl %eax,%eax # Clear LDT
|
xorl %eax,%eax # Clear LDT
|
||||||
lldt %ax
|
lldt %ax
|
||||||
@@ -339,20 +339,6 @@ SYM_FUNC_END(startup_32_smp)
|
|||||||
*/
|
*/
|
||||||
__INIT
|
__INIT
|
||||||
setup_once:
|
setup_once:
|
||||||
#ifdef CONFIG_STACKPROTECTOR
|
|
||||||
/*
|
|
||||||
* Configure the stack canary. The linker can't handle this by
|
|
||||||
* relocation. Manually set base address in stack canary
|
|
||||||
* segment descriptor.
|
|
||||||
*/
|
|
||||||
movl $gdt_page,%eax
|
|
||||||
movl $stack_canary,%ecx
|
|
||||||
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
|
|
||||||
shrl $16, %ecx
|
|
||||||
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
|
|
||||||
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
andl $0,setup_once_ref /* Once is enough, thanks */
|
andl $0,setup_once_ref /* Once is enough, thanks */
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
|||||||
@@ -28,10 +28,8 @@ static void bug_at(const void *ip, int line)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const void *
|
static const void *
|
||||||
__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, int init)
|
__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
|
||||||
{
|
{
|
||||||
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
|
|
||||||
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
|
|
||||||
const void *expect, *code;
|
const void *expect, *code;
|
||||||
const void *addr, *dest;
|
const void *addr, *dest;
|
||||||
int line;
|
int line;
|
||||||
@@ -41,10 +39,8 @@ __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type,
|
|||||||
|
|
||||||
code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
|
code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
|
||||||
|
|
||||||
if (init) {
|
if (type == JUMP_LABEL_JMP) {
|
||||||
expect = default_nop; line = __LINE__;
|
expect = x86_nops[5]; line = __LINE__;
|
||||||
} else if (type == JUMP_LABEL_JMP) {
|
|
||||||
expect = ideal_nop; line = __LINE__;
|
|
||||||
} else {
|
} else {
|
||||||
expect = code; line = __LINE__;
|
expect = code; line = __LINE__;
|
||||||
}
|
}
|
||||||
@@ -53,7 +49,7 @@ __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type,
|
|||||||
bug_at(addr, line);
|
bug_at(addr, line);
|
||||||
|
|
||||||
if (type == JUMP_LABEL_NOP)
|
if (type == JUMP_LABEL_NOP)
|
||||||
code = ideal_nop;
|
code = x86_nops[5];
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
@@ -62,7 +58,7 @@ static inline void __jump_label_transform(struct jump_entry *entry,
|
|||||||
enum jump_label_type type,
|
enum jump_label_type type,
|
||||||
int init)
|
int init)
|
||||||
{
|
{
|
||||||
const void *opcode = __jump_label_set_jump_code(entry, type, init);
|
const void *opcode = __jump_label_set_jump_code(entry, type);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* As long as only a single processor is running and the code is still
|
* As long as only a single processor is running and the code is still
|
||||||
@@ -113,7 +109,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&text_mutex);
|
mutex_lock(&text_mutex);
|
||||||
opcode = __jump_label_set_jump_code(entry, type, 0);
|
opcode = __jump_label_set_jump_code(entry, type);
|
||||||
text_poke_queue((void *)jump_entry_code(entry),
|
text_poke_queue((void *)jump_entry_code(entry),
|
||||||
opcode, JUMP_LABEL_NOP_SIZE, NULL);
|
opcode, JUMP_LABEL_NOP_SIZE, NULL);
|
||||||
mutex_unlock(&text_mutex);
|
mutex_unlock(&text_mutex);
|
||||||
@@ -136,22 +132,6 @@ static enum {
|
|||||||
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
|
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
|
||||||
enum jump_label_type type)
|
enum jump_label_type type)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* This function is called at boot up and when modules are
|
|
||||||
* first loaded. Check if the default nop, the one that is
|
|
||||||
* inserted at compile time, is the ideal nop. If it is, then
|
|
||||||
* we do not need to update the nop, and we can leave it as is.
|
|
||||||
* If it is not, then we need to update the nop to the ideal nop.
|
|
||||||
*/
|
|
||||||
if (jlstate == JL_STATE_START) {
|
|
||||||
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
|
|
||||||
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
|
|
||||||
|
|
||||||
if (memcmp(ideal_nop, default_nop, 5) != 0)
|
|
||||||
jlstate = JL_STATE_UPDATE;
|
|
||||||
else
|
|
||||||
jlstate = JL_STATE_NO_UPDATE;
|
|
||||||
}
|
|
||||||
if (jlstate == JL_STATE_UPDATE)
|
if (jlstate == JL_STATE_UPDATE)
|
||||||
jump_label_transform(entry, type, 1);
|
jump_label_transform(entry, type, 1);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall);
|
|||||||
int can_boost(struct insn *insn, void *addr)
|
int can_boost(struct insn *insn, void *addr)
|
||||||
{
|
{
|
||||||
kprobe_opcode_t opcode;
|
kprobe_opcode_t opcode;
|
||||||
|
insn_byte_t prefix;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (search_exception_tables((unsigned long)addr))
|
if (search_exception_tables((unsigned long)addr))
|
||||||
return 0; /* Page fault may occur on this address. */
|
return 0; /* Page fault may occur on this address. */
|
||||||
@@ -151,35 +153,39 @@ int can_boost(struct insn *insn, void *addr)
|
|||||||
if (insn->opcode.nbytes != 1)
|
if (insn->opcode.nbytes != 1)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Can't boost Address-size override prefix */
|
for_each_insn_prefix(insn, i, prefix) {
|
||||||
if (unlikely(inat_is_address_size_prefix(insn->attr)))
|
insn_attr_t attr;
|
||||||
return 0;
|
|
||||||
|
attr = inat_get_opcode_attribute(prefix);
|
||||||
|
/* Can't boost Address-size override prefix and CS override prefix */
|
||||||
|
if (prefix == 0x2e || inat_is_address_size_prefix(attr))
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
opcode = insn->opcode.bytes[0];
|
opcode = insn->opcode.bytes[0];
|
||||||
|
|
||||||
switch (opcode & 0xf0) {
|
switch (opcode) {
|
||||||
case 0x60:
|
case 0x62: /* bound */
|
||||||
/* can't boost "bound" */
|
case 0x70 ... 0x7f: /* Conditional jumps */
|
||||||
return (opcode != 0x62);
|
case 0x9a: /* Call far */
|
||||||
case 0x70:
|
case 0xc0 ... 0xc1: /* Grp2 */
|
||||||
return 0; /* can't boost conditional jump */
|
case 0xcc ... 0xce: /* software exceptions */
|
||||||
case 0x90:
|
case 0xd0 ... 0xd3: /* Grp2 */
|
||||||
return opcode != 0x9a; /* can't boost call far */
|
case 0xd6: /* (UD) */
|
||||||
case 0xc0:
|
case 0xd8 ... 0xdf: /* ESC */
|
||||||
/* can't boost software-interruptions */
|
case 0xe0 ... 0xe3: /* LOOP*, JCXZ */
|
||||||
return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
|
case 0xe8 ... 0xe9: /* near Call, JMP */
|
||||||
case 0xd0:
|
case 0xeb: /* Short JMP */
|
||||||
/* can boost AA* and XLAT */
|
case 0xf0 ... 0xf4: /* LOCK/REP, HLT */
|
||||||
return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
|
case 0xf6 ... 0xf7: /* Grp3 */
|
||||||
case 0xe0:
|
case 0xfe: /* Grp4 */
|
||||||
/* can boost in/out and absolute jmps */
|
/* ... are not boostable */
|
||||||
return ((opcode & 0x04) || opcode == 0xea);
|
return 0;
|
||||||
case 0xf0:
|
case 0xff: /* Grp5 */
|
||||||
/* clear and set flags are boostable */
|
/* Only indirect jmp is boostable */
|
||||||
return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
|
return X86_MODRM_REG(insn->modrm.bytes[0]) == 4;
|
||||||
default:
|
default:
|
||||||
/* CS override prefix and call are not boostable */
|
return 1;
|
||||||
return (opcode != 0x2e && opcode != 0x9a);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -229,7 +235,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
|
|||||||
return 0UL;
|
return 0UL;
|
||||||
|
|
||||||
if (faddr)
|
if (faddr)
|
||||||
memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
|
memcpy(buf, x86_nops[5], 5);
|
||||||
else
|
else
|
||||||
buf[0] = kp->opcode;
|
buf[0] = kp->opcode;
|
||||||
return (unsigned long)buf;
|
return (unsigned long)buf;
|
||||||
@@ -265,6 +271,8 @@ static int can_probe(unsigned long paddr)
|
|||||||
/* Decode instructions */
|
/* Decode instructions */
|
||||||
addr = paddr - offset;
|
addr = paddr - offset;
|
||||||
while (addr < paddr) {
|
while (addr < paddr) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the instruction has been modified by another
|
* Check if the instruction has been modified by another
|
||||||
* kprobe, in which case we replace the breakpoint by the
|
* kprobe, in which case we replace the breakpoint by the
|
||||||
@@ -276,8 +284,10 @@ static int can_probe(unsigned long paddr)
|
|||||||
__addr = recover_probed_instruction(buf, addr);
|
__addr = recover_probed_instruction(buf, addr);
|
||||||
if (!__addr)
|
if (!__addr)
|
||||||
return 0;
|
return 0;
|
||||||
kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
|
|
||||||
insn_get_length(&insn);
|
ret = insn_decode_kernel(&insn, (void *)__addr);
|
||||||
|
if (ret < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Another debugging subsystem might insert this breakpoint.
|
* Another debugging subsystem might insert this breakpoint.
|
||||||
@@ -301,8 +311,8 @@ static int can_probe(unsigned long paddr)
|
|||||||
int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
|
int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
|
||||||
{
|
{
|
||||||
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
||||||
unsigned long recovered_insn =
|
unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src);
|
||||||
recover_probed_instruction(buf, (unsigned long)src);
|
int ret;
|
||||||
|
|
||||||
if (!recovered_insn || !insn)
|
if (!recovered_insn || !insn)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -312,8 +322,9 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
|
|||||||
MAX_INSN_SIZE))
|
MAX_INSN_SIZE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
kernel_insn_init(insn, dest, MAX_INSN_SIZE);
|
ret = insn_decode_kernel(insn, dest);
|
||||||
insn_get_length(insn);
|
if (ret < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* We can not probe force emulate prefixed instruction */
|
/* We can not probe force emulate prefixed instruction */
|
||||||
if (insn_has_emulate_prefix(insn))
|
if (insn_has_emulate_prefix(insn))
|
||||||
@@ -357,13 +368,14 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
|
|||||||
return insn->length;
|
return insn->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare reljump right after instruction to boost */
|
/* Prepare reljump or int3 right after instruction */
|
||||||
static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
|
static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
|
||||||
struct insn *insn)
|
struct insn *insn)
|
||||||
{
|
{
|
||||||
int len = insn->length;
|
int len = insn->length;
|
||||||
|
|
||||||
if (can_boost(insn, p->addr) &&
|
if (!IS_ENABLED(CONFIG_PREEMPTION) &&
|
||||||
|
!p->post_handler && can_boost(insn, p->addr) &&
|
||||||
MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
|
MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
|
||||||
/*
|
/*
|
||||||
* These instructions can be executed directly if it
|
* These instructions can be executed directly if it
|
||||||
@@ -374,7 +386,12 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
|
|||||||
len += JMP32_INSN_SIZE;
|
len += JMP32_INSN_SIZE;
|
||||||
p->ainsn.boostable = 1;
|
p->ainsn.boostable = 1;
|
||||||
} else {
|
} else {
|
||||||
p->ainsn.boostable = 0;
|
/* Otherwise, put an int3 for trapping singlestep */
|
||||||
|
if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
|
||||||
|
return -ENOSPC;
|
||||||
|
|
||||||
|
buf[len] = INT3_INSN_OPCODE;
|
||||||
|
len += INT3_INSN_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
@@ -411,86 +428,290 @@ void free_insn_page(void *page)
|
|||||||
module_memfree(page);
|
module_memfree(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_resume_flags(struct kprobe *p, struct insn *insn)
|
/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
|
||||||
|
|
||||||
|
static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
switch (p->ainsn.opcode) {
|
||||||
|
case 0xfa: /* cli */
|
||||||
|
regs->flags &= ~(X86_EFLAGS_IF);
|
||||||
|
break;
|
||||||
|
case 0xfb: /* sti */
|
||||||
|
regs->flags |= X86_EFLAGS_IF;
|
||||||
|
break;
|
||||||
|
case 0x9c: /* pushf */
|
||||||
|
int3_emulate_push(regs, regs->flags);
|
||||||
|
break;
|
||||||
|
case 0x9d: /* popf */
|
||||||
|
regs->flags = int3_emulate_pop(regs);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
|
||||||
|
|
||||||
|
static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
int3_emulate_ret(regs);
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_ret);
|
||||||
|
|
||||||
|
static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
|
||||||
|
|
||||||
|
func += p->ainsn.rel32;
|
||||||
|
int3_emulate_call(regs, func);
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_call);
|
||||||
|
|
||||||
|
static nokprobe_inline
|
||||||
|
void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
|
||||||
|
{
|
||||||
|
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
|
||||||
|
|
||||||
|
if (cond)
|
||||||
|
ip += p->ainsn.rel32;
|
||||||
|
int3_emulate_jmp(regs, ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
__kprobe_emulate_jmp(p, regs, true);
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_jmp);
|
||||||
|
|
||||||
|
static const unsigned long jcc_mask[6] = {
|
||||||
|
[0] = X86_EFLAGS_OF,
|
||||||
|
[1] = X86_EFLAGS_CF,
|
||||||
|
[2] = X86_EFLAGS_ZF,
|
||||||
|
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
|
||||||
|
[4] = X86_EFLAGS_SF,
|
||||||
|
[5] = X86_EFLAGS_PF,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
bool invert = p->ainsn.jcc.type & 1;
|
||||||
|
bool match;
|
||||||
|
|
||||||
|
if (p->ainsn.jcc.type < 0xc) {
|
||||||
|
match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
|
||||||
|
} else {
|
||||||
|
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
|
||||||
|
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
|
||||||
|
if (p->ainsn.jcc.type >= 0xe)
|
||||||
|
match = match && (regs->flags & X86_EFLAGS_ZF);
|
||||||
|
}
|
||||||
|
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_jcc);
|
||||||
|
|
||||||
|
static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
bool match;
|
||||||
|
|
||||||
|
if (p->ainsn.loop.type != 3) { /* LOOP* */
|
||||||
|
if (p->ainsn.loop.asize == 32)
|
||||||
|
match = ((*(u32 *)®s->cx)--) != 0;
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
else if (p->ainsn.loop.asize == 64)
|
||||||
|
match = ((*(u64 *)®s->cx)--) != 0;
|
||||||
|
#endif
|
||||||
|
else
|
||||||
|
match = ((*(u16 *)®s->cx)--) != 0;
|
||||||
|
} else { /* JCXZ */
|
||||||
|
if (p->ainsn.loop.asize == 32)
|
||||||
|
match = *(u32 *)(®s->cx) == 0;
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
else if (p->ainsn.loop.asize == 64)
|
||||||
|
match = *(u64 *)(®s->cx) == 0;
|
||||||
|
#endif
|
||||||
|
else
|
||||||
|
match = *(u16 *)(®s->cx) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p->ainsn.loop.type == 0) /* LOOPNE */
|
||||||
|
match = match && !(regs->flags & X86_EFLAGS_ZF);
|
||||||
|
else if (p->ainsn.loop.type == 1) /* LOOPE */
|
||||||
|
match = match && (regs->flags & X86_EFLAGS_ZF);
|
||||||
|
|
||||||
|
__kprobe_emulate_jmp(p, regs, match);
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_loop);
|
||||||
|
|
||||||
|
static const int addrmode_regoffs[] = {
|
||||||
|
offsetof(struct pt_regs, ax),
|
||||||
|
offsetof(struct pt_regs, cx),
|
||||||
|
offsetof(struct pt_regs, dx),
|
||||||
|
offsetof(struct pt_regs, bx),
|
||||||
|
offsetof(struct pt_regs, sp),
|
||||||
|
offsetof(struct pt_regs, bp),
|
||||||
|
offsetof(struct pt_regs, si),
|
||||||
|
offsetof(struct pt_regs, di),
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
offsetof(struct pt_regs, r8),
|
||||||
|
offsetof(struct pt_regs, r9),
|
||||||
|
offsetof(struct pt_regs, r10),
|
||||||
|
offsetof(struct pt_regs, r11),
|
||||||
|
offsetof(struct pt_regs, r12),
|
||||||
|
offsetof(struct pt_regs, r13),
|
||||||
|
offsetof(struct pt_regs, r14),
|
||||||
|
offsetof(struct pt_regs, r15),
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
|
||||||
|
|
||||||
|
int3_emulate_call(regs, regs_get_register(regs, offs));
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
|
||||||
|
|
||||||
|
static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
|
||||||
|
|
||||||
|
int3_emulate_jmp(regs, regs_get_register(regs, offs));
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
|
||||||
|
|
||||||
|
static int prepare_emulation(struct kprobe *p, struct insn *insn)
|
||||||
{
|
{
|
||||||
insn_byte_t opcode = insn->opcode.bytes[0];
|
insn_byte_t opcode = insn->opcode.bytes[0];
|
||||||
|
|
||||||
switch (opcode) {
|
switch (opcode) {
|
||||||
case 0xfa: /* cli */
|
case 0xfa: /* cli */
|
||||||
case 0xfb: /* sti */
|
case 0xfb: /* sti */
|
||||||
|
case 0x9c: /* pushfl */
|
||||||
case 0x9d: /* popf/popfd */
|
case 0x9d: /* popf/popfd */
|
||||||
/* Check whether the instruction modifies Interrupt Flag or not */
|
/*
|
||||||
p->ainsn.if_modifier = 1;
|
* IF modifiers must be emulated since it will enable interrupt while
|
||||||
|
* int3 single stepping.
|
||||||
|
*/
|
||||||
|
p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
|
||||||
|
p->ainsn.opcode = opcode;
|
||||||
break;
|
break;
|
||||||
case 0x9c: /* pushfl */
|
|
||||||
p->ainsn.is_pushf = 1;
|
|
||||||
break;
|
|
||||||
case 0xcf: /* iret */
|
|
||||||
p->ainsn.if_modifier = 1;
|
|
||||||
fallthrough;
|
|
||||||
case 0xc2: /* ret/lret */
|
case 0xc2: /* ret/lret */
|
||||||
case 0xc3:
|
case 0xc3:
|
||||||
case 0xca:
|
case 0xca:
|
||||||
case 0xcb:
|
case 0xcb:
|
||||||
case 0xea: /* jmp absolute -- ip is correct */
|
p->ainsn.emulate_op = kprobe_emulate_ret;
|
||||||
/* ip is already adjusted, no more changes required */
|
|
||||||
p->ainsn.is_abs_ip = 1;
|
|
||||||
/* Without resume jump, this is boostable */
|
|
||||||
p->ainsn.boostable = 1;
|
|
||||||
break;
|
break;
|
||||||
case 0xe8: /* call relative - Fix return addr */
|
case 0x9a: /* far call absolute -- segment is not supported */
|
||||||
p->ainsn.is_call = 1;
|
case 0xea: /* far jmp absolute -- segment is not supported */
|
||||||
|
case 0xcc: /* int3 */
|
||||||
|
case 0xcf: /* iret -- in-kernel IRET is not supported */
|
||||||
|
return -EOPNOTSUPP;
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_X86_32
|
case 0xe8: /* near call relative */
|
||||||
case 0x9a: /* call absolute -- same as call absolute, indirect */
|
p->ainsn.emulate_op = kprobe_emulate_call;
|
||||||
p->ainsn.is_call = 1;
|
if (insn->immediate.nbytes == 2)
|
||||||
p->ainsn.is_abs_ip = 1;
|
p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
|
||||||
|
else
|
||||||
|
p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
|
||||||
break;
|
break;
|
||||||
#endif
|
case 0xeb: /* short jump relative */
|
||||||
case 0xff:
|
case 0xe9: /* near jump relative */
|
||||||
|
p->ainsn.emulate_op = kprobe_emulate_jmp;
|
||||||
|
if (insn->immediate.nbytes == 1)
|
||||||
|
p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
|
||||||
|
else if (insn->immediate.nbytes == 2)
|
||||||
|
p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
|
||||||
|
else
|
||||||
|
p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
|
||||||
|
break;
|
||||||
|
case 0x70 ... 0x7f:
|
||||||
|
/* 1 byte conditional jump */
|
||||||
|
p->ainsn.emulate_op = kprobe_emulate_jcc;
|
||||||
|
p->ainsn.jcc.type = opcode & 0xf;
|
||||||
|
p->ainsn.rel32 = *(char *)insn->immediate.bytes;
|
||||||
|
break;
|
||||||
|
case 0x0f:
|
||||||
opcode = insn->opcode.bytes[1];
|
opcode = insn->opcode.bytes[1];
|
||||||
if ((opcode & 0x30) == 0x10) {
|
if ((opcode & 0xf0) == 0x80) {
|
||||||
/*
|
/* 2 bytes Conditional Jump */
|
||||||
* call absolute, indirect
|
p->ainsn.emulate_op = kprobe_emulate_jcc;
|
||||||
* Fix return addr; ip is correct.
|
p->ainsn.jcc.type = opcode & 0xf;
|
||||||
* But this is not boostable
|
if (insn->immediate.nbytes == 2)
|
||||||
*/
|
p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
|
||||||
p->ainsn.is_call = 1;
|
else
|
||||||
p->ainsn.is_abs_ip = 1;
|
p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
|
||||||
break;
|
} else if (opcode == 0x01 &&
|
||||||
} else if (((opcode & 0x31) == 0x20) ||
|
X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
|
||||||
((opcode & 0x31) == 0x21)) {
|
X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
|
||||||
/*
|
/* VM extensions - not supported */
|
||||||
* jmp near and far, absolute indirect
|
return -EOPNOTSUPP;
|
||||||
* ip is correct.
|
|
||||||
*/
|
|
||||||
p->ainsn.is_abs_ip = 1;
|
|
||||||
/* Without resume jump, this is boostable */
|
|
||||||
p->ainsn.boostable = 1;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case 0xe0: /* Loop NZ */
|
||||||
|
case 0xe1: /* Loop */
|
||||||
|
case 0xe2: /* Loop */
|
||||||
|
case 0xe3: /* J*CXZ */
|
||||||
|
p->ainsn.emulate_op = kprobe_emulate_loop;
|
||||||
|
p->ainsn.loop.type = opcode & 0x3;
|
||||||
|
p->ainsn.loop.asize = insn->addr_bytes * 8;
|
||||||
|
p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
|
||||||
|
break;
|
||||||
|
case 0xff:
|
||||||
|
/*
|
||||||
|
* Since the 0xff is an extended group opcode, the instruction
|
||||||
|
* is determined by the MOD/RM byte.
|
||||||
|
*/
|
||||||
|
opcode = insn->modrm.bytes[0];
|
||||||
|
if ((opcode & 0x30) == 0x10) {
|
||||||
|
if ((opcode & 0x8) == 0x8)
|
||||||
|
return -EOPNOTSUPP; /* far call */
|
||||||
|
/* call absolute, indirect */
|
||||||
|
p->ainsn.emulate_op = kprobe_emulate_call_indirect;
|
||||||
|
} else if ((opcode & 0x30) == 0x20) {
|
||||||
|
if ((opcode & 0x8) == 0x8)
|
||||||
|
return -EOPNOTSUPP; /* far jmp */
|
||||||
|
/* jmp near absolute indirect */
|
||||||
|
p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
|
||||||
|
} else
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (insn->addr_bytes != sizeof(unsigned long))
|
||||||
|
return -EOPNOTSUPP; /* Don't support differnt size */
|
||||||
|
if (X86_MODRM_MOD(opcode) != 3)
|
||||||
|
return -EOPNOTSUPP; /* TODO: support memory addressing */
|
||||||
|
|
||||||
|
p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
if (X86_REX_B(insn->rex_prefix.value))
|
||||||
|
p->ainsn.indirect.reg += 8;
|
||||||
|
#endif
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
p->ainsn.size = insn->length;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arch_copy_kprobe(struct kprobe *p)
|
static int arch_copy_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
||||||
int len;
|
int ret, len;
|
||||||
|
|
||||||
/* Copy an instruction with recovering if other optprobe modifies it.*/
|
/* Copy an instruction with recovering if other optprobe modifies it.*/
|
||||||
len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
|
len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
|
||||||
if (!len)
|
if (!len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/* Analyze the opcode and setup emulate functions */
|
||||||
* __copy_instruction can modify the displacement of the instruction,
|
ret = prepare_emulation(p, &insn);
|
||||||
* but it doesn't affect boostable check.
|
if (ret < 0)
|
||||||
*/
|
return ret;
|
||||||
len = prepare_boost(buf, p, &insn);
|
|
||||||
|
|
||||||
/* Analyze the opcode and set resume flags */
|
/* Add int3 for single-step or booster jmp */
|
||||||
set_resume_flags(p, &insn);
|
len = prepare_singlestep(buf, p, &insn);
|
||||||
|
if (len < 0)
|
||||||
|
return len;
|
||||||
|
|
||||||
/* Also, displacement change doesn't affect the first byte */
|
/* Also, displacement change doesn't affect the first byte */
|
||||||
p->opcode = buf[0];
|
p->opcode = buf[0];
|
||||||
@@ -583,29 +804,7 @@ set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
|||||||
{
|
{
|
||||||
__this_cpu_write(current_kprobe, p);
|
__this_cpu_write(current_kprobe, p);
|
||||||
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
|
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
|
||||||
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
|
= (regs->flags & X86_EFLAGS_IF);
|
||||||
if (p->ainsn.if_modifier)
|
|
||||||
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
|
|
||||||
}
|
|
||||||
|
|
||||||
static nokprobe_inline void clear_btf(void)
|
|
||||||
{
|
|
||||||
if (test_thread_flag(TIF_BLOCKSTEP)) {
|
|
||||||
unsigned long debugctl = get_debugctlmsr();
|
|
||||||
|
|
||||||
debugctl &= ~DEBUGCTLMSR_BTF;
|
|
||||||
update_debugctlmsr(debugctl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static nokprobe_inline void restore_btf(void)
|
|
||||||
{
|
|
||||||
if (test_thread_flag(TIF_BLOCKSTEP)) {
|
|
||||||
unsigned long debugctl = get_debugctlmsr();
|
|
||||||
|
|
||||||
debugctl |= DEBUGCTLMSR_BTF;
|
|
||||||
update_debugctlmsr(debugctl);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
|
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||||
@@ -620,6 +819,22 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
|
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
|
||||||
|
|
||||||
|
static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
|
||||||
|
struct kprobe_ctlblk *kcb)
|
||||||
|
{
|
||||||
|
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
||||||
|
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||||
|
cur->post_handler(cur, regs, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Restore back the original saved kprobes variables and continue. */
|
||||||
|
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||||
|
restore_previous_kprobe(kcb);
|
||||||
|
else
|
||||||
|
reset_current_kprobe();
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(kprobe_post_process);
|
||||||
|
|
||||||
static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
|
static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||||
struct kprobe_ctlblk *kcb, int reenter)
|
struct kprobe_ctlblk *kcb, int reenter)
|
||||||
{
|
{
|
||||||
@@ -627,7 +842,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
#if !defined(CONFIG_PREEMPTION)
|
#if !defined(CONFIG_PREEMPTION)
|
||||||
if (p->ainsn.boostable && !p->post_handler) {
|
if (p->ainsn.boostable) {
|
||||||
/* Boost up -- we can execute copied instructions directly */
|
/* Boost up -- we can execute copied instructions directly */
|
||||||
if (!reenter)
|
if (!reenter)
|
||||||
reset_current_kprobe();
|
reset_current_kprobe();
|
||||||
@@ -646,18 +861,50 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
|
|||||||
kcb->kprobe_status = KPROBE_REENTER;
|
kcb->kprobe_status = KPROBE_REENTER;
|
||||||
} else
|
} else
|
||||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||||
/* Prepare real single stepping */
|
|
||||||
clear_btf();
|
if (p->ainsn.emulate_op) {
|
||||||
regs->flags |= X86_EFLAGS_TF;
|
p->ainsn.emulate_op(p, regs);
|
||||||
|
kprobe_post_process(p, regs, kcb);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Disable interrupt, and set ip register on trampoline */
|
||||||
regs->flags &= ~X86_EFLAGS_IF;
|
regs->flags &= ~X86_EFLAGS_IF;
|
||||||
/* single step inline if the instruction is an int3 */
|
regs->ip = (unsigned long)p->ainsn.insn;
|
||||||
if (p->opcode == INT3_INSN_OPCODE)
|
|
||||||
regs->ip = (unsigned long)p->addr;
|
|
||||||
else
|
|
||||||
regs->ip = (unsigned long)p->ainsn.insn;
|
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(setup_singlestep);
|
NOKPROBE_SYMBOL(setup_singlestep);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called after single-stepping. p->addr is the address of the
|
||||||
|
* instruction whose first byte has been replaced by the "int3"
|
||||||
|
* instruction. To avoid the SMP problems that can occur when we
|
||||||
|
* temporarily put back the original opcode to single-step, we
|
||||||
|
* single-stepped a copy of the instruction. The address of this
|
||||||
|
* copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
|
||||||
|
* right after the copied instruction.
|
||||||
|
* Different from the trap single-step, "int3" single-step can not
|
||||||
|
* handle the instruction which changes the ip register, e.g. jmp,
|
||||||
|
* call, conditional jmp, and the instructions which changes the IF
|
||||||
|
* flags because interrupt must be disabled around the single-stepping.
|
||||||
|
* Such instructions are software emulated, but others are single-stepped
|
||||||
|
* using "int3".
|
||||||
|
*
|
||||||
|
* When the 2nd "int3" handled, the regs->ip and regs->flags needs to
|
||||||
|
* be adjusted, so that we can resume execution on correct code.
|
||||||
|
*/
|
||||||
|
static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||||
|
struct kprobe_ctlblk *kcb)
|
||||||
|
{
|
||||||
|
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
|
||||||
|
unsigned long orig_ip = (unsigned long)p->addr;
|
||||||
|
|
||||||
|
/* Restore saved interrupt flag and ip register */
|
||||||
|
regs->flags |= kcb->kprobe_saved_flags;
|
||||||
|
/* Note that regs->ip is executed int3 so must be a step back */
|
||||||
|
regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(resume_singlestep);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have reentered the kprobe_handler(), since another probe was hit while
|
* We have reentered the kprobe_handler(), since another probe was hit while
|
||||||
* within the handler. We save the original kprobes variables and just single
|
* within the handler. We save the original kprobes variables and just single
|
||||||
@@ -693,6 +940,12 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
|
|||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(reenter_kprobe);
|
NOKPROBE_SYMBOL(reenter_kprobe);
|
||||||
|
|
||||||
|
static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
|
||||||
|
{
|
||||||
|
return (kcb->kprobe_status == KPROBE_HIT_SS ||
|
||||||
|
kcb->kprobe_status == KPROBE_REENTER);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
|
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
|
||||||
* remain disabled throughout this function.
|
* remain disabled throughout this function.
|
||||||
@@ -737,7 +990,18 @@ int kprobe_int3_handler(struct pt_regs *regs)
|
|||||||
reset_current_kprobe();
|
reset_current_kprobe();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
} else if (*addr != INT3_INSN_OPCODE) {
|
} else if (kprobe_is_ss(kcb)) {
|
||||||
|
p = kprobe_running();
|
||||||
|
if ((unsigned long)p->ainsn.insn < regs->ip &&
|
||||||
|
(unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
|
||||||
|
/* Most provably this is the second int3 for singlestep */
|
||||||
|
resume_singlestep(p, regs, kcb);
|
||||||
|
kprobe_post_process(p, regs, kcb);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*addr != INT3_INSN_OPCODE) {
|
||||||
/*
|
/*
|
||||||
* The breakpoint instruction was removed right
|
* The breakpoint instruction was removed right
|
||||||
* after we hit it. Another cpu has removed
|
* after we hit it. Another cpu has removed
|
||||||
@@ -810,91 +1074,6 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(trampoline_handler);
|
NOKPROBE_SYMBOL(trampoline_handler);
|
||||||
|
|
||||||
/*
|
|
||||||
* Called after single-stepping. p->addr is the address of the
|
|
||||||
* instruction whose first byte has been replaced by the "int 3"
|
|
||||||
* instruction. To avoid the SMP problems that can occur when we
|
|
||||||
* temporarily put back the original opcode to single-step, we
|
|
||||||
* single-stepped a copy of the instruction. The address of this
|
|
||||||
* copy is p->ainsn.insn.
|
|
||||||
*
|
|
||||||
* This function prepares to return from the post-single-step
|
|
||||||
* interrupt. We have to fix up the stack as follows:
|
|
||||||
*
|
|
||||||
* 0) Except in the case of absolute or indirect jump or call instructions,
|
|
||||||
* the new ip is relative to the copied instruction. We need to make
|
|
||||||
* it relative to the original instruction.
|
|
||||||
*
|
|
||||||
* 1) If the single-stepped instruction was pushfl, then the TF and IF
|
|
||||||
* flags are set in the just-pushed flags, and may need to be cleared.
|
|
||||||
*
|
|
||||||
* 2) If the single-stepped instruction was a call, the return address
|
|
||||||
* that is atop the stack is the address following the copied instruction.
|
|
||||||
* We need to make it the address following the original instruction.
|
|
||||||
*/
|
|
||||||
static void resume_execution(struct kprobe *p, struct pt_regs *regs,
|
|
||||||
struct kprobe_ctlblk *kcb)
|
|
||||||
{
|
|
||||||
unsigned long *tos = stack_addr(regs);
|
|
||||||
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
|
|
||||||
unsigned long orig_ip = (unsigned long)p->addr;
|
|
||||||
|
|
||||||
regs->flags &= ~X86_EFLAGS_TF;
|
|
||||||
|
|
||||||
/* Fixup the contents of top of stack */
|
|
||||||
if (p->ainsn.is_pushf) {
|
|
||||||
*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
|
|
||||||
*tos |= kcb->kprobe_old_flags;
|
|
||||||
} else if (p->ainsn.is_call) {
|
|
||||||
*tos = orig_ip + (*tos - copy_ip);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!p->ainsn.is_abs_ip)
|
|
||||||
regs->ip += orig_ip - copy_ip;
|
|
||||||
|
|
||||||
restore_btf();
|
|
||||||
}
|
|
||||||
NOKPROBE_SYMBOL(resume_execution);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
|
|
||||||
* remain disabled throughout this function.
|
|
||||||
*/
|
|
||||||
int kprobe_debug_handler(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct kprobe *cur = kprobe_running();
|
|
||||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
|
||||||
|
|
||||||
if (!cur)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
resume_execution(cur, regs, kcb);
|
|
||||||
regs->flags |= kcb->kprobe_saved_flags;
|
|
||||||
|
|
||||||
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
|
||||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
|
||||||
cur->post_handler(cur, regs, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Restore back the original saved kprobes variables and continue. */
|
|
||||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
|
||||||
restore_previous_kprobe(kcb);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
reset_current_kprobe();
|
|
||||||
out:
|
|
||||||
/*
|
|
||||||
* if somebody else is singlestepping across a probe point, flags
|
|
||||||
* will have TF set, in which case, continue the remaining processing
|
|
||||||
* of do_debug, as if this is not a probe hit.
|
|
||||||
*/
|
|
||||||
if (regs->flags & X86_EFLAGS_TF)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
NOKPROBE_SYMBOL(kprobe_debug_handler);
|
|
||||||
|
|
||||||
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||||
{
|
{
|
||||||
struct kprobe *cur = kprobe_running();
|
struct kprobe *cur = kprobe_running();
|
||||||
@@ -912,20 +1091,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
|||||||
* normal page fault.
|
* normal page fault.
|
||||||
*/
|
*/
|
||||||
regs->ip = (unsigned long)cur->addr;
|
regs->ip = (unsigned long)cur->addr;
|
||||||
/*
|
|
||||||
* Trap flag (TF) has been set here because this fault
|
|
||||||
* happened where the single stepping will be done.
|
|
||||||
* So clear it by resetting the current kprobe:
|
|
||||||
*/
|
|
||||||
regs->flags &= ~X86_EFLAGS_TF;
|
|
||||||
/*
|
|
||||||
* Since the single step (trap) has been cancelled,
|
|
||||||
* we need to restore BTF here.
|
|
||||||
*/
|
|
||||||
restore_btf();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the TF flag was set before the kprobe hit,
|
* If the IF flag was set before the kprobe hit,
|
||||||
* don't touch it:
|
* don't touch it:
|
||||||
*/
|
*/
|
||||||
regs->flags |= kcb->kprobe_old_flags;
|
regs->flags |= kcb->kprobe_old_flags;
|
||||||
|
|||||||
@@ -312,6 +312,8 @@ static int can_optimize(unsigned long paddr)
|
|||||||
addr = paddr - offset;
|
addr = paddr - offset;
|
||||||
while (addr < paddr - offset + size) { /* Decode until function end */
|
while (addr < paddr - offset + size) { /* Decode until function end */
|
||||||
unsigned long recovered_insn;
|
unsigned long recovered_insn;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (search_exception_tables(addr))
|
if (search_exception_tables(addr))
|
||||||
/*
|
/*
|
||||||
* Since some fixup code will jumps into this function,
|
* Since some fixup code will jumps into this function,
|
||||||
@@ -321,8 +323,11 @@ static int can_optimize(unsigned long paddr)
|
|||||||
recovered_insn = recover_probed_instruction(buf, addr);
|
recovered_insn = recover_probed_instruction(buf, addr);
|
||||||
if (!recovered_insn)
|
if (!recovered_insn)
|
||||||
return 0;
|
return 0;
|
||||||
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
|
|
||||||
insn_get_length(&insn);
|
ret = insn_decode_kernel(&insn, (void *)recovered_insn);
|
||||||
|
if (ret < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the case of detecting unknown breakpoint, this could be
|
* In the case of detecting unknown breakpoint, this could be
|
||||||
* a padding INT3 between functions. Let's check that all the
|
* a padding INT3 between functions. Let's check that all the
|
||||||
|
|||||||
@@ -822,7 +822,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
idt_setup_early_traps();
|
idt_setup_early_traps();
|
||||||
early_cpu_init();
|
early_cpu_init();
|
||||||
arch_init_ideal_nops();
|
|
||||||
jump_label_init();
|
jump_label_init();
|
||||||
static_call_init();
|
static_call_init();
|
||||||
early_ioremap_init();
|
early_ioremap_init();
|
||||||
|
|||||||
@@ -224,7 +224,6 @@ void __init setup_per_cpu_areas(void)
|
|||||||
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
|
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
|
||||||
per_cpu(cpu_number, cpu) = cpu;
|
per_cpu(cpu_number, cpu) = cpu;
|
||||||
setup_percpu_segment(cpu);
|
setup_percpu_segment(cpu);
|
||||||
setup_stack_canary_segment(cpu);
|
|
||||||
/*
|
/*
|
||||||
* Copy data used in early init routines from the
|
* Copy data used in early init routines from the
|
||||||
* initial arrays to the per cpu data areas. These
|
* initial arrays to the per cpu data areas. These
|
||||||
|
|||||||
@@ -263,39 +263,54 @@ static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
|
|||||||
return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
|
return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
char buffer[MAX_INSN_SIZE];
|
char buffer[MAX_INSN_SIZE];
|
||||||
enum es_result ret;
|
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
if (user_mode(ctxt->regs)) {
|
res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
|
||||||
res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
|
if (!res) {
|
||||||
if (!res) {
|
ctxt->fi.vector = X86_TRAP_PF;
|
||||||
ctxt->fi.vector = X86_TRAP_PF;
|
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
|
||||||
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
|
ctxt->fi.cr2 = ctxt->regs->ip;
|
||||||
ctxt->fi.cr2 = ctxt->regs->ip;
|
return ES_EXCEPTION;
|
||||||
return ES_EXCEPTION;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res))
|
|
||||||
return ES_DECODE_FAILED;
|
|
||||||
} else {
|
|
||||||
res = vc_fetch_insn_kernel(ctxt, buffer);
|
|
||||||
if (res) {
|
|
||||||
ctxt->fi.vector = X86_TRAP_PF;
|
|
||||||
ctxt->fi.error_code = X86_PF_INSTR;
|
|
||||||
ctxt->fi.cr2 = ctxt->regs->ip;
|
|
||||||
return ES_EXCEPTION;
|
|
||||||
}
|
|
||||||
|
|
||||||
insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1);
|
|
||||||
insn_get_length(&ctxt->insn);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
|
if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res))
|
||||||
|
return ES_DECODE_FAILED;
|
||||||
|
|
||||||
return ret;
|
if (ctxt->insn.immediate.got)
|
||||||
|
return ES_OK;
|
||||||
|
else
|
||||||
|
return ES_DECODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
|
||||||
|
{
|
||||||
|
char buffer[MAX_INSN_SIZE];
|
||||||
|
int res, ret;
|
||||||
|
|
||||||
|
res = vc_fetch_insn_kernel(ctxt, buffer);
|
||||||
|
if (res) {
|
||||||
|
ctxt->fi.vector = X86_TRAP_PF;
|
||||||
|
ctxt->fi.error_code = X86_PF_INSTR;
|
||||||
|
ctxt->fi.cr2 = ctxt->regs->ip;
|
||||||
|
return ES_EXCEPTION;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
|
||||||
|
if (ret < 0)
|
||||||
|
return ES_DECODE_FAILED;
|
||||||
|
else
|
||||||
|
return ES_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
||||||
|
{
|
||||||
|
if (user_mode(ctxt->regs))
|
||||||
|
return __vc_decode_user_insn(ctxt);
|
||||||
|
else
|
||||||
|
return __vc_decode_kern_insn(ctxt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
||||||
|
|||||||
@@ -458,47 +458,12 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||||
* Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
|
|
||||||
*
|
|
||||||
* These are Intel CPUs that enumerate an LLC that is shared by
|
|
||||||
* multiple NUMA nodes. The LLC on these systems is shared for
|
|
||||||
* off-package data access but private to the NUMA node (half
|
|
||||||
* of the package) for on-package access.
|
|
||||||
*
|
|
||||||
* CPUID (the source of the information about the LLC) can only
|
|
||||||
* enumerate the cache as being shared *or* unshared, but not
|
|
||||||
* this particular configuration. The CPU in this case enumerates
|
|
||||||
* the cache to be shared across the entire package (spanning both
|
|
||||||
* NUMA nodes).
|
|
||||||
*/
|
|
||||||
|
|
||||||
static const struct x86_cpu_id snc_cpu[] = {
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
|
|
||||||
{}
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|
||||||
{
|
{
|
||||||
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
if (c->phys_proc_id == o->phys_proc_id &&
|
||||||
|
c->cpu_die_id == o->cpu_die_id)
|
||||||
/* Do not match if we do not have a valid APICID for cpu: */
|
return true;
|
||||||
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
|
return false;
|
||||||
return false;
|
|
||||||
|
|
||||||
/* Do not match if LLC id does not match: */
|
|
||||||
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allow the SNC topology without warning. Return of false
|
|
||||||
* means 'c' does not share the LLC of 'o'. This will be
|
|
||||||
* reflected to userspace.
|
|
||||||
*/
|
|
||||||
if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return topology_sane(c, o, "llc");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -513,12 +478,50 @@ static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
/*
|
||||||
|
* Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
|
||||||
|
*
|
||||||
|
* Any Intel CPU that has multiple nodes per package and does not
|
||||||
|
* match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
|
||||||
|
*
|
||||||
|
* When in SNC mode, these CPUs enumerate an LLC that is shared
|
||||||
|
* by multiple NUMA nodes. The LLC is shared for off-package data
|
||||||
|
* access but private to the NUMA node (half of the package) for
|
||||||
|
* on-package access. CPUID (the source of the information about
|
||||||
|
* the LLC) can only enumerate the cache as shared or unshared,
|
||||||
|
* but not this particular configuration.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static const struct x86_cpu_id intel_cod_cpu[] = {
|
||||||
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */
|
||||||
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */
|
||||||
|
X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||||
{
|
{
|
||||||
if ((c->phys_proc_id == o->phys_proc_id) &&
|
const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
|
||||||
(c->cpu_die_id == o->cpu_die_id))
|
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
||||||
return true;
|
bool intel_snc = id && id->driver_data;
|
||||||
return false;
|
|
||||||
|
/* Do not match if we do not have a valid APICID for cpu: */
|
||||||
|
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Do not match if LLC id does not match: */
|
||||||
|
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allow the SNC topology without warning. Return of false
|
||||||
|
* means 'c' does not share the LLC of 'o'. This will be
|
||||||
|
* reflected to userspace.
|
||||||
|
*/
|
||||||
|
if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return topology_sane(c, o, "llc");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu)
|
|||||||
for_each_cpu(i, cpu_sibling_setup_mask) {
|
for_each_cpu(i, cpu_sibling_setup_mask) {
|
||||||
o = &cpu_data(i);
|
o = &cpu_data(i);
|
||||||
|
|
||||||
|
if (match_pkg(c, o) && !topology_same_node(c, o))
|
||||||
|
x86_has_numa_in_package = true;
|
||||||
|
|
||||||
if ((i == cpu) || (has_smt && match_smt(c, o)))
|
if ((i == cpu) || (has_smt && match_smt(c, o)))
|
||||||
link_mask(topology_sibling_cpumask, cpu, i);
|
link_mask(topology_sibling_cpumask, cpu, i);
|
||||||
|
|
||||||
if ((i == cpu) || (has_mp && match_llc(c, o)))
|
if ((i == cpu) || (has_mp && match_llc(c, o)))
|
||||||
link_mask(cpu_llc_shared_mask, cpu, i);
|
link_mask(cpu_llc_shared_mask, cpu, i);
|
||||||
|
|
||||||
|
if ((i == cpu) || (has_mp && match_die(c, o)))
|
||||||
|
link_mask(topology_die_cpumask, cpu, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
threads = cpumask_weight(topology_sibling_cpumask(cpu));
|
||||||
|
if (threads > __max_smt_threads)
|
||||||
|
__max_smt_threads = threads;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This needs a separate iteration over the cpus because we rely on all
|
* This needs a separate iteration over the cpus because we rely on all
|
||||||
* topology_sibling_cpumask links to be set-up.
|
* topology_sibling_cpumask links to be set-up.
|
||||||
@@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu)
|
|||||||
/*
|
/*
|
||||||
* Does this new cpu bringup a new core?
|
* Does this new cpu bringup a new core?
|
||||||
*/
|
*/
|
||||||
if (cpumask_weight(
|
if (threads == 1) {
|
||||||
topology_sibling_cpumask(cpu)) == 1) {
|
|
||||||
/*
|
/*
|
||||||
* for each core in package, increment
|
* for each core in package, increment
|
||||||
* the booted_cores for this new cpu
|
* the booted_cores for this new cpu
|
||||||
@@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu)
|
|||||||
} else if (i != cpu && !c->booted_cores)
|
} else if (i != cpu && !c->booted_cores)
|
||||||
c->booted_cores = cpu_data(i).booted_cores;
|
c->booted_cores = cpu_data(i).booted_cores;
|
||||||
}
|
}
|
||||||
if (match_pkg(c, o) && !topology_same_node(c, o))
|
|
||||||
x86_has_numa_in_package = true;
|
|
||||||
|
|
||||||
if ((i == cpu) || (has_mp && match_die(c, o)))
|
|
||||||
link_mask(topology_die_cpumask, cpu, i);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
threads = cpumask_weight(topology_sibling_cpumask(cpu));
|
|
||||||
if (threads > __max_smt_threads)
|
|
||||||
__max_smt_threads = threads;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* maps the cpu to the sched domain representing multi-core */
|
/* maps the cpu to the sched domain representing multi-core */
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case NOP:
|
case NOP:
|
||||||
code = ideal_nops[NOP_ATOMIC5];
|
code = x86_nops[5];
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case JMP:
|
case JMP:
|
||||||
@@ -66,7 +66,7 @@ static void __static_call_validate(void *insn, bool tail)
|
|||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
if (opcode == CALL_INSN_OPCODE ||
|
if (opcode == CALL_INSN_OPCODE ||
|
||||||
!memcmp(insn, ideal_nops[NOP_ATOMIC5], 5) ||
|
!memcmp(insn, x86_nops[5], 5) ||
|
||||||
!memcmp(insn, xor5rax, 5))
|
!memcmp(insn, xor5rax, 5))
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -164,17 +164,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
|
|||||||
savesegment(fs, sel);
|
savesegment(fs, sel);
|
||||||
if (sel == modified_sel)
|
if (sel == modified_sel)
|
||||||
loadsegment(fs, sel);
|
loadsegment(fs, sel);
|
||||||
|
#endif
|
||||||
|
|
||||||
savesegment(gs, sel);
|
savesegment(gs, sel);
|
||||||
if (sel == modified_sel)
|
if (sel == modified_sel)
|
||||||
load_gs_index(sel);
|
load_gs_index(sel);
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32_LAZY_GS
|
|
||||||
savesegment(gs, sel);
|
|
||||||
if (sel == modified_sel)
|
|
||||||
loadsegment(gs, sel);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (p->thread.fsindex == modified_sel)
|
if (p->thread.fsindex == modified_sel)
|
||||||
|
|||||||
@@ -498,14 +498,15 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
|
|||||||
{
|
{
|
||||||
u8 insn_buf[MAX_INSN_SIZE];
|
u8 insn_buf[MAX_INSN_SIZE];
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
|
if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
|
||||||
MAX_INSN_SIZE))
|
MAX_INSN_SIZE))
|
||||||
return GP_NO_HINT;
|
return GP_NO_HINT;
|
||||||
|
|
||||||
kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
|
ret = insn_decode_kernel(&insn, insn_buf);
|
||||||
insn_get_modrm(&insn);
|
if (ret < 0)
|
||||||
insn_get_sib(&insn);
|
return GP_NO_HINT;
|
||||||
|
|
||||||
*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
|
*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
|
||||||
if (*addr == -1UL)
|
if (*addr == -1UL)
|
||||||
@@ -889,9 +890,6 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
|
|||||||
if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
|
if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
|
||||||
dr6 &= ~DR_STEP;
|
dr6 &= ~DR_STEP;
|
||||||
|
|
||||||
if (kprobe_debug_handler(regs))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The kernel doesn't use INT1
|
* The kernel doesn't use INT1
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -356,7 +356,7 @@ bool fixup_umip_exception(struct pt_regs *regs)
|
|||||||
if (!nr_copied)
|
if (!nr_copied)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!insn_decode(&insn, regs, buf, nr_copied))
|
if (!insn_decode_from_regs(&insn, regs, buf, nr_copied))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
umip_inst = identify_insn(&insn);
|
umip_inst = identify_insn(&insn);
|
||||||
|
|||||||
@@ -276,12 +276,12 @@ static bool is_prefix_bad(struct insn *insn)
|
|||||||
|
|
||||||
static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
|
static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
|
||||||
{
|
{
|
||||||
|
enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
|
||||||
u32 volatile *good_insns;
|
u32 volatile *good_insns;
|
||||||
|
int ret;
|
||||||
|
|
||||||
insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
|
ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
|
||||||
/* has the side-effect of processing the entire instruction */
|
if (ret < 0)
|
||||||
insn_get_length(insn);
|
|
||||||
if (!insn_complete(insn))
|
|
||||||
return -ENOEXEC;
|
return -ENOEXEC;
|
||||||
|
|
||||||
if (is_prefix_bad(insn))
|
if (is_prefix_bad(insn))
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
*
|
*
|
||||||
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||||
*/
|
*/
|
||||||
#include <asm/insn.h>
|
#include <asm/insn.h> /* __ignore_sync_check__ */
|
||||||
|
|
||||||
/* Attribute tables are generated from opcode map */
|
/* Attribute tables are generated from opcode map */
|
||||||
#include "inat-tables.c"
|
#include "inat-tables.c"
|
||||||
|
|||||||
@@ -404,10 +404,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
|
|||||||
case INAT_SEG_REG_FS:
|
case INAT_SEG_REG_FS:
|
||||||
return (unsigned short)(regs->fs & 0xffff);
|
return (unsigned short)(regs->fs & 0xffff);
|
||||||
case INAT_SEG_REG_GS:
|
case INAT_SEG_REG_GS:
|
||||||
/*
|
|
||||||
* GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS.
|
|
||||||
* The macro below takes care of both cases.
|
|
||||||
*/
|
|
||||||
return get_user_gs(regs);
|
return get_user_gs(regs);
|
||||||
case INAT_SEG_REG_IGNORE:
|
case INAT_SEG_REG_IGNORE:
|
||||||
default:
|
default:
|
||||||
@@ -928,10 +924,11 @@ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs,
|
|||||||
static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs,
|
static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs,
|
||||||
int *regoff, long *eff_addr)
|
int *regoff, long *eff_addr)
|
||||||
{
|
{
|
||||||
insn_get_modrm(insn);
|
int ret;
|
||||||
|
|
||||||
if (!insn->modrm.nbytes)
|
ret = insn_get_modrm(insn);
|
||||||
return -EINVAL;
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (X86_MODRM_MOD(insn->modrm.value) != 3)
|
if (X86_MODRM_MOD(insn->modrm.value) != 3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -977,14 +974,14 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs,
|
|||||||
int *regoff, long *eff_addr)
|
int *regoff, long *eff_addr)
|
||||||
{
|
{
|
||||||
long tmp;
|
long tmp;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
|
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
insn_get_modrm(insn);
|
ret = insn_get_modrm(insn);
|
||||||
|
if (ret)
|
||||||
if (!insn->modrm.nbytes)
|
return ret;
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (X86_MODRM_MOD(insn->modrm.value) > 2)
|
if (X86_MODRM_MOD(insn->modrm.value) > 2)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -1106,18 +1103,21 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
|
|||||||
* @base_offset will have a register, as an offset from the base of pt_regs,
|
* @base_offset will have a register, as an offset from the base of pt_regs,
|
||||||
* that can be used to resolve the associated segment.
|
* that can be used to resolve the associated segment.
|
||||||
*
|
*
|
||||||
* -EINVAL on error.
|
* Negative value on error.
|
||||||
*/
|
*/
|
||||||
static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
|
static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
|
||||||
int *base_offset, long *eff_addr)
|
int *base_offset, long *eff_addr)
|
||||||
{
|
{
|
||||||
long base, indx;
|
long base, indx;
|
||||||
int indx_offset;
|
int indx_offset;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
|
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
insn_get_modrm(insn);
|
ret = insn_get_modrm(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (!insn->modrm.nbytes)
|
if (!insn->modrm.nbytes)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -1125,7 +1125,9 @@ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
|
|||||||
if (X86_MODRM_MOD(insn->modrm.value) > 2)
|
if (X86_MODRM_MOD(insn->modrm.value) > 2)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
insn_get_sib(insn);
|
ret = insn_get_sib(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (!insn->sib.nbytes)
|
if (!insn->sib.nbytes)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -1194,8 +1196,8 @@ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs)
|
|||||||
short eff_addr;
|
short eff_addr;
|
||||||
long tmp;
|
long tmp;
|
||||||
|
|
||||||
insn_get_modrm(insn);
|
if (insn_get_displacement(insn))
|
||||||
insn_get_displacement(insn);
|
goto out;
|
||||||
|
|
||||||
if (insn->addr_bytes != 2)
|
if (insn->addr_bytes != 2)
|
||||||
goto out;
|
goto out;
|
||||||
@@ -1492,7 +1494,7 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_IN
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* insn_decode() - Decode an instruction
|
* insn_decode_from_regs() - Decode an instruction
|
||||||
* @insn: Structure to store decoded instruction
|
* @insn: Structure to store decoded instruction
|
||||||
* @regs: Structure with register values as seen when entering kernel mode
|
* @regs: Structure with register values as seen when entering kernel mode
|
||||||
* @buf: Buffer containing the instruction bytes
|
* @buf: Buffer containing the instruction bytes
|
||||||
@@ -1505,8 +1507,8 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_IN
|
|||||||
*
|
*
|
||||||
* True if instruction was decoded, False otherwise.
|
* True if instruction was decoded, False otherwise.
|
||||||
*/
|
*/
|
||||||
bool insn_decode(struct insn *insn, struct pt_regs *regs,
|
bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
|
||||||
unsigned char buf[MAX_INSN_SIZE], int buf_size)
|
unsigned char buf[MAX_INSN_SIZE], int buf_size)
|
||||||
{
|
{
|
||||||
int seg_defs;
|
int seg_defs;
|
||||||
|
|
||||||
@@ -1529,7 +1531,9 @@ bool insn_decode(struct insn *insn, struct pt_regs *regs,
|
|||||||
insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs);
|
insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs);
|
||||||
insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs);
|
insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs);
|
||||||
|
|
||||||
insn_get_length(insn);
|
if (insn_get_length(insn))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (buf_size < insn->length)
|
if (buf_size < insn->length)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|||||||
@@ -11,10 +11,13 @@
|
|||||||
#else
|
#else
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#endif
|
#endif
|
||||||
#include <asm/inat.h>
|
#include <asm/inat.h> /*__ignore_sync_check__ */
|
||||||
#include <asm/insn.h>
|
#include <asm/insn.h> /* __ignore_sync_check__ */
|
||||||
|
|
||||||
#include <asm/emulate_prefix.h>
|
#include <linux/errno.h>
|
||||||
|
#include <linux/kconfig.h>
|
||||||
|
|
||||||
|
#include <asm/emulate_prefix.h> /* __ignore_sync_check__ */
|
||||||
|
|
||||||
#define leXX_to_cpu(t, r) \
|
#define leXX_to_cpu(t, r) \
|
||||||
({ \
|
({ \
|
||||||
@@ -51,6 +54,7 @@
|
|||||||
* insn_init() - initialize struct insn
|
* insn_init() - initialize struct insn
|
||||||
* @insn: &struct insn to be initialized
|
* @insn: &struct insn to be initialized
|
||||||
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
|
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
|
||||||
|
* @buf_len: length of the insn buffer at @kaddr
|
||||||
* @x86_64: !0 for 64-bit kernel or 64-bit app
|
* @x86_64: !0 for 64-bit kernel or 64-bit app
|
||||||
*/
|
*/
|
||||||
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
|
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
|
||||||
@@ -111,8 +115,12 @@ static void insn_get_emulate_prefix(struct insn *insn)
|
|||||||
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
|
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
|
||||||
* to point to the (first) opcode. No effect if @insn->prefixes.got
|
* to point to the (first) opcode. No effect if @insn->prefixes.got
|
||||||
* is already set.
|
* is already set.
|
||||||
|
*
|
||||||
|
* * Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_prefixes(struct insn *insn)
|
int insn_get_prefixes(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *prefixes = &insn->prefixes;
|
struct insn_field *prefixes = &insn->prefixes;
|
||||||
insn_attr_t attr;
|
insn_attr_t attr;
|
||||||
@@ -120,7 +128,7 @@ void insn_get_prefixes(struct insn *insn)
|
|||||||
int i, nb;
|
int i, nb;
|
||||||
|
|
||||||
if (prefixes->got)
|
if (prefixes->got)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
insn_get_emulate_prefix(insn);
|
insn_get_emulate_prefix(insn);
|
||||||
|
|
||||||
@@ -230,8 +238,10 @@ vex_end:
|
|||||||
|
|
||||||
prefixes->got = 1;
|
prefixes->got = 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -243,16 +253,25 @@ err_out:
|
|||||||
* If necessary, first collects any preceding (prefix) bytes.
|
* If necessary, first collects any preceding (prefix) bytes.
|
||||||
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
|
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
|
||||||
* is already 1.
|
* is already 1.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_opcode(struct insn *insn)
|
int insn_get_opcode(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *opcode = &insn->opcode;
|
struct insn_field *opcode = &insn->opcode;
|
||||||
|
int pfx_id, ret;
|
||||||
insn_byte_t op;
|
insn_byte_t op;
|
||||||
int pfx_id;
|
|
||||||
if (opcode->got)
|
if (opcode->got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->prefixes.got)
|
|
||||||
insn_get_prefixes(insn);
|
if (!insn->prefixes.got) {
|
||||||
|
ret = insn_get_prefixes(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Get first opcode */
|
/* Get first opcode */
|
||||||
op = get_next(insn_byte_t, insn);
|
op = get_next(insn_byte_t, insn);
|
||||||
@@ -267,9 +286,13 @@ void insn_get_opcode(struct insn *insn)
|
|||||||
insn->attr = inat_get_avx_attribute(op, m, p);
|
insn->attr = inat_get_avx_attribute(op, m, p);
|
||||||
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
|
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
|
||||||
(!inat_accept_vex(insn->attr) &&
|
(!inat_accept_vex(insn->attr) &&
|
||||||
!inat_is_group(insn->attr)))
|
!inat_is_group(insn->attr))) {
|
||||||
insn->attr = 0; /* This instruction is bad */
|
/* This instruction is bad */
|
||||||
goto end; /* VEX has only 1 byte for opcode */
|
insn->attr = 0;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
/* VEX has only 1 byte for opcode */
|
||||||
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
insn->attr = inat_get_opcode_attribute(op);
|
insn->attr = inat_get_opcode_attribute(op);
|
||||||
@@ -280,13 +303,18 @@ void insn_get_opcode(struct insn *insn)
|
|||||||
pfx_id = insn_last_prefix_id(insn);
|
pfx_id = insn_last_prefix_id(insn);
|
||||||
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
|
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
|
||||||
}
|
}
|
||||||
if (inat_must_vex(insn->attr))
|
|
||||||
insn->attr = 0; /* This instruction is bad */
|
if (inat_must_vex(insn->attr)) {
|
||||||
|
/* This instruction is bad */
|
||||||
|
insn->attr = 0;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
end:
|
end:
|
||||||
opcode->got = 1;
|
opcode->got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -296,15 +324,25 @@ err_out:
|
|||||||
* Populates @insn->modrm and updates @insn->next_byte to point past the
|
* Populates @insn->modrm and updates @insn->next_byte to point past the
|
||||||
* ModRM byte, if any. If necessary, first collects the preceding bytes
|
* ModRM byte, if any. If necessary, first collects the preceding bytes
|
||||||
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
|
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_modrm(struct insn *insn)
|
int insn_get_modrm(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *modrm = &insn->modrm;
|
struct insn_field *modrm = &insn->modrm;
|
||||||
insn_byte_t pfx_id, mod;
|
insn_byte_t pfx_id, mod;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (modrm->got)
|
if (modrm->got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->opcode.got)
|
|
||||||
insn_get_opcode(insn);
|
if (!insn->opcode.got) {
|
||||||
|
ret = insn_get_opcode(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (inat_has_modrm(insn->attr)) {
|
if (inat_has_modrm(insn->attr)) {
|
||||||
mod = get_next(insn_byte_t, insn);
|
mod = get_next(insn_byte_t, insn);
|
||||||
@@ -313,17 +351,22 @@ void insn_get_modrm(struct insn *insn)
|
|||||||
pfx_id = insn_last_prefix_id(insn);
|
pfx_id = insn_last_prefix_id(insn);
|
||||||
insn->attr = inat_get_group_attribute(mod, pfx_id,
|
insn->attr = inat_get_group_attribute(mod, pfx_id,
|
||||||
insn->attr);
|
insn->attr);
|
||||||
if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
|
if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
|
||||||
insn->attr = 0; /* This is bad */
|
/* Bad insn */
|
||||||
|
insn->attr = 0;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (insn->x86_64 && inat_is_force64(insn->attr))
|
if (insn->x86_64 && inat_is_force64(insn->attr))
|
||||||
insn->opnd_bytes = 8;
|
insn->opnd_bytes = 8;
|
||||||
|
|
||||||
modrm->got = 1;
|
modrm->got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -337,11 +380,16 @@ err_out:
|
|||||||
int insn_rip_relative(struct insn *insn)
|
int insn_rip_relative(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *modrm = &insn->modrm;
|
struct insn_field *modrm = &insn->modrm;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!insn->x86_64)
|
if (!insn->x86_64)
|
||||||
return 0;
|
return 0;
|
||||||
if (!modrm->got)
|
|
||||||
insn_get_modrm(insn);
|
if (!modrm->got) {
|
||||||
|
ret = insn_get_modrm(insn);
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* For rip-relative instructions, the mod field (top 2 bits)
|
* For rip-relative instructions, the mod field (top 2 bits)
|
||||||
* is zero and the r/m field (bottom 3 bits) is 0x5.
|
* is zero and the r/m field (bottom 3 bits) is 0x5.
|
||||||
@@ -355,15 +403,25 @@ int insn_rip_relative(struct insn *insn)
|
|||||||
*
|
*
|
||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* ModRM byte.
|
* ModRM byte.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: if decoding succeeded
|
||||||
|
* < 0: otherwise.
|
||||||
*/
|
*/
|
||||||
void insn_get_sib(struct insn *insn)
|
int insn_get_sib(struct insn *insn)
|
||||||
{
|
{
|
||||||
insn_byte_t modrm;
|
insn_byte_t modrm;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->sib.got)
|
if (insn->sib.got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->modrm.got)
|
|
||||||
insn_get_modrm(insn);
|
if (!insn->modrm.got) {
|
||||||
|
ret = insn_get_modrm(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (insn->modrm.nbytes) {
|
if (insn->modrm.nbytes) {
|
||||||
modrm = insn->modrm.bytes[0];
|
modrm = insn->modrm.bytes[0];
|
||||||
if (insn->addr_bytes != 2 &&
|
if (insn->addr_bytes != 2 &&
|
||||||
@@ -374,8 +432,10 @@ void insn_get_sib(struct insn *insn)
|
|||||||
}
|
}
|
||||||
insn->sib.got = 1;
|
insn->sib.got = 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -386,15 +446,25 @@ err_out:
|
|||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* SIB byte.
|
* SIB byte.
|
||||||
* Displacement value is sign-expanded.
|
* Displacement value is sign-expanded.
|
||||||
|
*
|
||||||
|
* * Returns:
|
||||||
|
* 0: if decoding succeeded
|
||||||
|
* < 0: otherwise.
|
||||||
*/
|
*/
|
||||||
void insn_get_displacement(struct insn *insn)
|
int insn_get_displacement(struct insn *insn)
|
||||||
{
|
{
|
||||||
insn_byte_t mod, rm, base;
|
insn_byte_t mod, rm, base;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->displacement.got)
|
if (insn->displacement.got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->sib.got)
|
|
||||||
insn_get_sib(insn);
|
if (!insn->sib.got) {
|
||||||
|
ret = insn_get_sib(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (insn->modrm.nbytes) {
|
if (insn->modrm.nbytes) {
|
||||||
/*
|
/*
|
||||||
* Interpreting the modrm byte:
|
* Interpreting the modrm byte:
|
||||||
@@ -436,9 +506,10 @@ void insn_get_displacement(struct insn *insn)
|
|||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
insn->displacement.got = 1;
|
insn->displacement.got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Decode moffset16/32/64. Return 0 if failed */
|
/* Decode moffset16/32/64. Return 0 if failed */
|
||||||
@@ -537,20 +608,30 @@ err_out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* insn_get_immediate() - Get the immediates of instruction
|
* insn_get_immediate() - Get the immediate in an instruction
|
||||||
* @insn: &struct insn containing instruction
|
* @insn: &struct insn containing instruction
|
||||||
*
|
*
|
||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* displacement bytes.
|
* displacement bytes.
|
||||||
* Basically, most of immediates are sign-expanded. Unsigned-value can be
|
* Basically, most of immediates are sign-expanded. Unsigned-value can be
|
||||||
* get by bit masking with ((1 << (nbytes * 8)) - 1)
|
* computed by bit masking with ((1 << (nbytes * 8)) - 1)
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_immediate(struct insn *insn)
|
int insn_get_immediate(struct insn *insn)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->immediate.got)
|
if (insn->immediate.got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->displacement.got)
|
|
||||||
insn_get_displacement(insn);
|
if (!insn->displacement.got) {
|
||||||
|
ret = insn_get_displacement(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (inat_has_moffset(insn->attr)) {
|
if (inat_has_moffset(insn->attr)) {
|
||||||
if (!__get_moffset(insn))
|
if (!__get_moffset(insn))
|
||||||
@@ -597,9 +678,10 @@ void insn_get_immediate(struct insn *insn)
|
|||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
insn->immediate.got = 1;
|
insn->immediate.got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -608,13 +690,65 @@ err_out:
|
|||||||
*
|
*
|
||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* immediates bytes.
|
* immediates bytes.
|
||||||
*/
|
*
|
||||||
void insn_get_length(struct insn *insn)
|
* Returns:
|
||||||
|
* - 0 on success
|
||||||
|
* - < 0 on error
|
||||||
|
*/
|
||||||
|
int insn_get_length(struct insn *insn)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->length)
|
if (insn->length)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->immediate.got)
|
|
||||||
insn_get_immediate(insn);
|
if (!insn->immediate.got) {
|
||||||
|
ret = insn_get_immediate(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
insn->length = (unsigned char)((unsigned long)insn->next_byte
|
insn->length = (unsigned char)((unsigned long)insn->next_byte
|
||||||
- (unsigned long)insn->kaddr);
|
- (unsigned long)insn->kaddr);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure this instruction is decoded completely */
|
||||||
|
static inline int insn_complete(struct insn *insn)
|
||||||
|
{
|
||||||
|
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
|
||||||
|
insn->displacement.got && insn->immediate.got;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* insn_decode() - Decode an x86 instruction
|
||||||
|
* @insn: &struct insn to be initialized
|
||||||
|
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
|
||||||
|
* @buf_len: length of the insn buffer at @kaddr
|
||||||
|
* @m: insn mode, see enum insn_mode
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: if decoding succeeded
|
||||||
|
* < 0: otherwise.
|
||||||
|
*/
|
||||||
|
int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */
|
||||||
|
|
||||||
|
if (m == INSN_MODE_KERN)
|
||||||
|
insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
|
||||||
|
else
|
||||||
|
insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
|
||||||
|
|
||||||
|
ret = insn_get_length(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (insn_complete(insn))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,27 +10,59 @@
|
|||||||
#include <asm/unwind_hints.h>
|
#include <asm/unwind_hints.h>
|
||||||
#include <asm/frame.h>
|
#include <asm/frame.h>
|
||||||
|
|
||||||
.macro THUNK reg
|
|
||||||
.section .text.__x86.indirect_thunk
|
.section .text.__x86.indirect_thunk
|
||||||
|
|
||||||
.align 32
|
.macro RETPOLINE reg
|
||||||
SYM_FUNC_START(__x86_indirect_thunk_\reg)
|
|
||||||
JMP_NOSPEC \reg
|
|
||||||
SYM_FUNC_END(__x86_indirect_thunk_\reg)
|
|
||||||
|
|
||||||
SYM_FUNC_START_NOALIGN(__x86_retpoline_\reg)
|
|
||||||
ANNOTATE_INTRA_FUNCTION_CALL
|
ANNOTATE_INTRA_FUNCTION_CALL
|
||||||
call .Ldo_rop_\@
|
call .Ldo_rop_\@
|
||||||
.Lspec_trap_\@:
|
.Lspec_trap_\@:
|
||||||
UNWIND_HINT_EMPTY
|
UNWIND_HINT_EMPTY
|
||||||
pause
|
pause
|
||||||
lfence
|
lfence
|
||||||
jmp .Lspec_trap_\@
|
jmp .Lspec_trap_\@
|
||||||
.Ldo_rop_\@:
|
.Ldo_rop_\@:
|
||||||
mov %\reg, (%_ASM_SP)
|
mov %\reg, (%_ASM_SP)
|
||||||
UNWIND_HINT_FUNC
|
UNWIND_HINT_FUNC
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(__x86_retpoline_\reg)
|
.endm
|
||||||
|
|
||||||
|
.macro THUNK reg
|
||||||
|
|
||||||
|
.align 32
|
||||||
|
|
||||||
|
SYM_FUNC_START(__x86_indirect_thunk_\reg)
|
||||||
|
|
||||||
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||||
|
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
|
||||||
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
|
||||||
|
|
||||||
|
SYM_FUNC_END(__x86_indirect_thunk_\reg)
|
||||||
|
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This generates .altinstr_replacement symbols for use by objtool. They,
|
||||||
|
* however, must not actually live in .altinstr_replacement since that will be
|
||||||
|
* discarded after init, but module alternatives will also reference these
|
||||||
|
* symbols.
|
||||||
|
*
|
||||||
|
* Their names matches the "__x86_indirect_" prefix to mark them as retpolines.
|
||||||
|
*/
|
||||||
|
.macro ALT_THUNK reg
|
||||||
|
|
||||||
|
.align 1
|
||||||
|
|
||||||
|
SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg)
|
||||||
|
ANNOTATE_RETPOLINE_SAFE
|
||||||
|
1: call *%\reg
|
||||||
|
2: .skip 5-(2b-1b), 0x90
|
||||||
|
SYM_FUNC_END(__x86_indirect_alt_call_\reg)
|
||||||
|
|
||||||
|
SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
|
||||||
|
ANNOTATE_RETPOLINE_SAFE
|
||||||
|
1: jmp *%\reg
|
||||||
|
2: .skip 5-(2b-1b), 0x90
|
||||||
|
SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
|
||||||
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
@@ -48,7 +80,6 @@ SYM_FUNC_END(__x86_retpoline_\reg)
|
|||||||
|
|
||||||
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
|
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
|
||||||
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
|
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
|
||||||
#define EXPORT_RETPOLINE(reg) __EXPORT_THUNK(__x86_retpoline_ ## reg)
|
|
||||||
|
|
||||||
#undef GEN
|
#undef GEN
|
||||||
#define GEN(reg) THUNK reg
|
#define GEN(reg) THUNK reg
|
||||||
@@ -59,5 +90,13 @@ SYM_FUNC_END(__x86_retpoline_\reg)
|
|||||||
#include <asm/GEN-for-each-reg.h>
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
|
||||||
#undef GEN
|
#undef GEN
|
||||||
#define GEN(reg) EXPORT_RETPOLINE(reg)
|
#define GEN(reg) ALT_THUNK reg
|
||||||
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
|
||||||
|
#undef GEN
|
||||||
|
#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_call_ ## reg)
|
||||||
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
|
||||||
|
#undef GEN
|
||||||
|
#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_jmp_ ## reg)
|
||||||
#include <asm/GEN-for-each-reg.h>
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
|||||||
@@ -282,7 +282,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
|
|||||||
/* BPF trampoline can be made to work without these nops,
|
/* BPF trampoline can be made to work without these nops,
|
||||||
* but let's waste 5 bytes for now and optimize later
|
* but let's waste 5 bytes for now and optimize later
|
||||||
*/
|
*/
|
||||||
memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
|
memcpy(prog, x86_nops[5], cnt);
|
||||||
prog += cnt;
|
prog += cnt;
|
||||||
if (!ebpf_from_cbpf) {
|
if (!ebpf_from_cbpf) {
|
||||||
if (tail_call_reachable && !is_subprog)
|
if (tail_call_reachable && !is_subprog)
|
||||||
@@ -330,7 +330,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||||||
void *old_addr, void *new_addr,
|
void *old_addr, void *new_addr,
|
||||||
const bool text_live)
|
const bool text_live)
|
||||||
{
|
{
|
||||||
const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
|
const u8 *nop_insn = x86_nops[5];
|
||||||
u8 old_insn[X86_PATCH_SIZE];
|
u8 old_insn[X86_PATCH_SIZE];
|
||||||
u8 new_insn[X86_PATCH_SIZE];
|
u8 new_insn[X86_PATCH_SIZE];
|
||||||
u8 *prog;
|
u8 *prog;
|
||||||
@@ -560,7 +560,7 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
|
|||||||
if (stack_depth)
|
if (stack_depth)
|
||||||
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
|
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
|
||||||
|
|
||||||
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
|
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
|
||||||
prog += X86_PATCH_SIZE;
|
prog += X86_PATCH_SIZE;
|
||||||
/* out: */
|
/* out: */
|
||||||
|
|
||||||
@@ -881,7 +881,7 @@ static int emit_nops(u8 **pprog, int len)
|
|||||||
noplen = ASM_NOP_MAX;
|
noplen = ASM_NOP_MAX;
|
||||||
|
|
||||||
for (i = 0; i < noplen; i++)
|
for (i = 0; i < noplen; i++)
|
||||||
EMIT1(ideal_nops[noplen][i]);
|
EMIT1(x86_nops[noplen][i]);
|
||||||
len -= noplen;
|
len -= noplen;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2021,7 +2021,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
|||||||
/* remember return value in a stack for bpf prog to access */
|
/* remember return value in a stack for bpf prog to access */
|
||||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||||
im->ip_after_call = prog;
|
im->ip_after_call = prog;
|
||||||
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
|
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
|
||||||
prog += X86_PATCH_SIZE;
|
prog += X86_PATCH_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -46,10 +46,8 @@
|
|||||||
|
|
||||||
#define PVH_GDT_ENTRY_CS 1
|
#define PVH_GDT_ENTRY_CS 1
|
||||||
#define PVH_GDT_ENTRY_DS 2
|
#define PVH_GDT_ENTRY_DS 2
|
||||||
#define PVH_GDT_ENTRY_CANARY 3
|
|
||||||
#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
|
#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
|
||||||
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
|
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
|
||||||
#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
|
|
||||||
|
|
||||||
SYM_CODE_START_LOCAL(pvh_start_xen)
|
SYM_CODE_START_LOCAL(pvh_start_xen)
|
||||||
cld
|
cld
|
||||||
@@ -111,17 +109,6 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
|
|||||||
|
|
||||||
#else /* CONFIG_X86_64 */
|
#else /* CONFIG_X86_64 */
|
||||||
|
|
||||||
/* Set base address in stack canary descriptor. */
|
|
||||||
movl $_pa(gdt_start),%eax
|
|
||||||
movl $_pa(canary),%ecx
|
|
||||||
movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
|
|
||||||
shrl $16, %ecx
|
|
||||||
movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
|
|
||||||
movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
|
|
||||||
|
|
||||||
mov $PVH_CANARY_SEL,%eax
|
|
||||||
mov %eax,%gs
|
|
||||||
|
|
||||||
call mk_early_pgtbl_32
|
call mk_early_pgtbl_32
|
||||||
|
|
||||||
mov $_pa(initial_page_table), %eax
|
mov $_pa(initial_page_table), %eax
|
||||||
@@ -165,7 +152,6 @@ SYM_DATA_START_LOCAL(gdt_start)
|
|||||||
.quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
|
.quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
|
||||||
#endif
|
#endif
|
||||||
.quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
|
.quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
|
||||||
.quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
|
|
||||||
SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
|
SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
|
||||||
|
|
||||||
.balign 16
|
.balign 16
|
||||||
|
|||||||
@@ -99,11 +99,8 @@ static void __save_processor_state(struct saved_context *ctxt)
|
|||||||
/*
|
/*
|
||||||
* segment registers
|
* segment registers
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86_32_LAZY_GS
|
|
||||||
savesegment(gs, ctxt->gs);
|
savesegment(gs, ctxt->gs);
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
savesegment(gs, ctxt->gs);
|
|
||||||
savesegment(fs, ctxt->fs);
|
savesegment(fs, ctxt->fs);
|
||||||
savesegment(ds, ctxt->ds);
|
savesegment(ds, ctxt->ds);
|
||||||
savesegment(es, ctxt->es);
|
savesegment(es, ctxt->es);
|
||||||
@@ -232,7 +229,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||||||
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
|
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
|
||||||
#else
|
#else
|
||||||
loadsegment(fs, __KERNEL_PERCPU);
|
loadsegment(fs, __KERNEL_PERCPU);
|
||||||
loadsegment(gs, __KERNEL_STACK_CANARY);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
|
/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
|
||||||
@@ -255,7 +251,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||||||
*/
|
*/
|
||||||
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
||||||
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
|
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
|
||||||
#elif defined(CONFIG_X86_32_LAZY_GS)
|
#else
|
||||||
loadsegment(gs, ctxt->gs);
|
loadsegment(gs, ctxt->gs);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
while (fgets(line, BUFSIZE, stdin)) {
|
while (fgets(line, BUFSIZE, stdin)) {
|
||||||
char copy[BUFSIZE], *s, *tab1, *tab2;
|
char copy[BUFSIZE], *s, *tab1, *tab2;
|
||||||
int nb = 0;
|
int nb = 0, ret;
|
||||||
unsigned int b;
|
unsigned int b;
|
||||||
|
|
||||||
if (line[0] == '<') {
|
if (line[0] == '<') {
|
||||||
@@ -148,10 +148,12 @@ int main(int argc, char **argv)
|
|||||||
} else
|
} else
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Decode an instruction */
|
/* Decode an instruction */
|
||||||
insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64);
|
ret = insn_decode(&insn, insn_buff, sizeof(insn_buff),
|
||||||
insn_get_length(&insn);
|
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
|
||||||
if (insn.length != nb) {
|
|
||||||
|
if (ret < 0 || insn.length != nb) {
|
||||||
warnings++;
|
warnings++;
|
||||||
pr_warn("Found an x86 instruction decoder bug, "
|
pr_warn("Found an x86 instruction decoder bug, "
|
||||||
"please report this.\n", sym);
|
"please report this.\n", sym);
|
||||||
|
|||||||
@@ -218,8 +218,8 @@ static void parse_args(int argc, char **argv)
|
|||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
|
int insns = 0, ret;
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
int insns = 0;
|
|
||||||
int errors = 0;
|
int errors = 0;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
unsigned char insn_buff[MAX_INSN_SIZE * 2];
|
unsigned char insn_buff[MAX_INSN_SIZE * 2];
|
||||||
@@ -237,15 +237,15 @@ int main(int argc, char **argv)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Decode an instruction */
|
/* Decode an instruction */
|
||||||
insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64);
|
ret = insn_decode(&insn, insn_buff, sizeof(insn_buff),
|
||||||
insn_get_length(&insn);
|
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
|
||||||
|
|
||||||
if (insn.next_byte <= insn.kaddr ||
|
if (insn.next_byte <= insn.kaddr ||
|
||||||
insn.kaddr + MAX_INSN_SIZE < insn.next_byte) {
|
insn.kaddr + MAX_INSN_SIZE < insn.next_byte) {
|
||||||
/* Access out-of-range memory */
|
/* Access out-of-range memory */
|
||||||
dump_stream(stderr, "Error: Found an access violation", i, insn_buff, &insn);
|
dump_stream(stderr, "Error: Found an access violation", i, insn_buff, &insn);
|
||||||
errors++;
|
errors++;
|
||||||
} else if (verbose && !insn_complete(&insn))
|
} else if (verbose && ret < 0)
|
||||||
dump_stream(stdout, "Info: Found an undecodable input", i, insn_buff, &insn);
|
dump_stream(stdout, "Info: Found an undecodable input", i, insn_buff, &insn);
|
||||||
else if (verbose >= 2)
|
else if (verbose >= 2)
|
||||||
dump_insn(stdout, &insn);
|
dump_insn(stdout, &insn);
|
||||||
|
|||||||
@@ -1202,7 +1202,6 @@ static void __init xen_setup_gdt(int cpu)
|
|||||||
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
|
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
|
||||||
pv_ops.cpu.load_gdt = xen_load_gdt_boot;
|
pv_ops.cpu.load_gdt = xen_load_gdt_boot;
|
||||||
|
|
||||||
setup_stack_canary_segment(cpu);
|
|
||||||
switch_to_new_gdt(cpu);
|
switch_to_new_gdt(cpu);
|
||||||
|
|
||||||
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
|
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
|
# This requires GCC 8.1 or better. Specifically, we require
|
||||||
|
# -mstack-protector-guard-reg, added by
|
||||||
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708
|
||||||
|
|
||||||
|
echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
*
|
*
|
||||||
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||||
*/
|
*/
|
||||||
#include "inat_types.h"
|
#include "inat_types.h" /* __ignore_sync_check__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal bits. Don't use bitmasks directly, because these bits are
|
* Internal bits. Don't use bitmasks directly, because these bits are
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
/* insn_attr_t is defined in inat.h */
|
/* insn_attr_t is defined in inat.h */
|
||||||
#include "inat.h"
|
#include "inat.h" /* __ignore_sync_check__ */
|
||||||
|
|
||||||
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
||||||
|
|
||||||
@@ -132,13 +132,25 @@ struct insn {
|
|||||||
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
||||||
|
|
||||||
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
||||||
extern void insn_get_prefixes(struct insn *insn);
|
extern int insn_get_prefixes(struct insn *insn);
|
||||||
extern void insn_get_opcode(struct insn *insn);
|
extern int insn_get_opcode(struct insn *insn);
|
||||||
extern void insn_get_modrm(struct insn *insn);
|
extern int insn_get_modrm(struct insn *insn);
|
||||||
extern void insn_get_sib(struct insn *insn);
|
extern int insn_get_sib(struct insn *insn);
|
||||||
extern void insn_get_displacement(struct insn *insn);
|
extern int insn_get_displacement(struct insn *insn);
|
||||||
extern void insn_get_immediate(struct insn *insn);
|
extern int insn_get_immediate(struct insn *insn);
|
||||||
extern void insn_get_length(struct insn *insn);
|
extern int insn_get_length(struct insn *insn);
|
||||||
|
|
||||||
|
enum insn_mode {
|
||||||
|
INSN_MODE_32,
|
||||||
|
INSN_MODE_64,
|
||||||
|
/* Mode is determined by the current kernel build. */
|
||||||
|
INSN_MODE_KERN,
|
||||||
|
INSN_NUM_MODES,
|
||||||
|
};
|
||||||
|
|
||||||
|
extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
|
||||||
|
|
||||||
|
#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
|
||||||
|
|
||||||
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
||||||
static inline void insn_get_attribute(struct insn *insn)
|
static inline void insn_get_attribute(struct insn *insn)
|
||||||
@@ -149,17 +161,6 @@ static inline void insn_get_attribute(struct insn *insn)
|
|||||||
/* Instruction uses RIP-relative addressing */
|
/* Instruction uses RIP-relative addressing */
|
||||||
extern int insn_rip_relative(struct insn *insn);
|
extern int insn_rip_relative(struct insn *insn);
|
||||||
|
|
||||||
/* Init insn for kernel text */
|
|
||||||
static inline void kernel_insn_init(struct insn *insn,
|
|
||||||
const void *kaddr, int buf_len)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
insn_init(insn, kaddr, buf_len, 1);
|
|
||||||
#else /* CONFIG_X86_32 */
|
|
||||||
insn_init(insn, kaddr, buf_len, 0);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int insn_is_avx(struct insn *insn)
|
static inline int insn_is_avx(struct insn *insn)
|
||||||
{
|
{
|
||||||
if (!insn->prefixes.got)
|
if (!insn->prefixes.got)
|
||||||
@@ -179,13 +180,6 @@ static inline int insn_has_emulate_prefix(struct insn *insn)
|
|||||||
return !!insn->emulate_prefix_size;
|
return !!insn->emulate_prefix_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure this instruction is decoded completely */
|
|
||||||
static inline int insn_complete(struct insn *insn)
|
|
||||||
{
|
|
||||||
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
|
|
||||||
insn->displacement.got && insn->immediate.got;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
|
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
|
||||||
{
|
{
|
||||||
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
|
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
|
||||||
|
|||||||
81
tools/arch/x86/include/asm/nops.h
Normal file
81
tools/arch/x86/include/asm/nops.h
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _ASM_X86_NOPS_H
|
||||||
|
#define _ASM_X86_NOPS_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define nops for use with alternative() and for tracing.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generic 32bit nops from GAS:
|
||||||
|
*
|
||||||
|
* 1: nop
|
||||||
|
* 2: movl %esi,%esi
|
||||||
|
* 3: leal 0x0(%esi),%esi
|
||||||
|
* 4: leal 0x0(%esi,%eiz,1),%esi
|
||||||
|
* 5: leal %ds:0x0(%esi,%eiz,1),%esi
|
||||||
|
* 6: leal 0x0(%esi),%esi
|
||||||
|
* 7: leal 0x0(%esi,%eiz,1),%esi
|
||||||
|
* 8: leal %ds:0x0(%esi,%eiz,1),%esi
|
||||||
|
*
|
||||||
|
* Except 5 and 8, which are DS prefixed 4 and 7 resp, where GAS would emit 2
|
||||||
|
* nop instructions.
|
||||||
|
*/
|
||||||
|
#define BYTES_NOP1 0x90
|
||||||
|
#define BYTES_NOP2 0x89,0xf6
|
||||||
|
#define BYTES_NOP3 0x8d,0x76,0x00
|
||||||
|
#define BYTES_NOP4 0x8d,0x74,0x26,0x00
|
||||||
|
#define BYTES_NOP5 0x3e,BYTES_NOP4
|
||||||
|
#define BYTES_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
|
||||||
|
#define BYTES_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
|
||||||
|
#define BYTES_NOP8 0x3e,BYTES_NOP7
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generic 64bit nops from GAS:
|
||||||
|
*
|
||||||
|
* 1: nop
|
||||||
|
* 2: osp nop
|
||||||
|
* 3: nopl (%eax)
|
||||||
|
* 4: nopl 0x00(%eax)
|
||||||
|
* 5: nopl 0x00(%eax,%eax,1)
|
||||||
|
* 6: osp nopl 0x00(%eax,%eax,1)
|
||||||
|
* 7: nopl 0x00000000(%eax)
|
||||||
|
* 8: nopl 0x00000000(%eax,%eax,1)
|
||||||
|
*/
|
||||||
|
#define BYTES_NOP1 0x90
|
||||||
|
#define BYTES_NOP2 0x66,BYTES_NOP1
|
||||||
|
#define BYTES_NOP3 0x0f,0x1f,0x00
|
||||||
|
#define BYTES_NOP4 0x0f,0x1f,0x40,0x00
|
||||||
|
#define BYTES_NOP5 0x0f,0x1f,0x44,0x00,0x00
|
||||||
|
#define BYTES_NOP6 0x66,BYTES_NOP5
|
||||||
|
#define BYTES_NOP7 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00
|
||||||
|
#define BYTES_NOP8 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
|
||||||
|
|
||||||
|
#endif /* CONFIG_64BIT */
|
||||||
|
|
||||||
|
#ifdef __ASSEMBLY__
|
||||||
|
#define _ASM_MK_NOP(x) .byte x
|
||||||
|
#else
|
||||||
|
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ASM_NOP1 _ASM_MK_NOP(BYTES_NOP1)
|
||||||
|
#define ASM_NOP2 _ASM_MK_NOP(BYTES_NOP2)
|
||||||
|
#define ASM_NOP3 _ASM_MK_NOP(BYTES_NOP3)
|
||||||
|
#define ASM_NOP4 _ASM_MK_NOP(BYTES_NOP4)
|
||||||
|
#define ASM_NOP5 _ASM_MK_NOP(BYTES_NOP5)
|
||||||
|
#define ASM_NOP6 _ASM_MK_NOP(BYTES_NOP6)
|
||||||
|
#define ASM_NOP7 _ASM_MK_NOP(BYTES_NOP7)
|
||||||
|
#define ASM_NOP8 _ASM_MK_NOP(BYTES_NOP8)
|
||||||
|
|
||||||
|
#define ASM_NOP_MAX 8
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
extern const unsigned char * const x86_nops[];
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* _ASM_X86_NOPS_H */
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
*
|
*
|
||||||
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||||
*/
|
*/
|
||||||
#include "../include/asm/insn.h"
|
#include "../include/asm/insn.h" /* __ignore_sync_check__ */
|
||||||
|
|
||||||
/* Attribute tables are generated from opcode map */
|
/* Attribute tables are generated from opcode map */
|
||||||
#include "inat-tables.c"
|
#include "inat-tables.c"
|
||||||
|
|||||||
@@ -11,10 +11,13 @@
|
|||||||
#else
|
#else
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#endif
|
#endif
|
||||||
#include "../include/asm/inat.h"
|
#include "../include/asm/inat.h" /* __ignore_sync_check__ */
|
||||||
#include "../include/asm/insn.h"
|
#include "../include/asm/insn.h" /* __ignore_sync_check__ */
|
||||||
|
|
||||||
#include "../include/asm/emulate_prefix.h"
|
#include <linux/errno.h>
|
||||||
|
#include <linux/kconfig.h>
|
||||||
|
|
||||||
|
#include "../include/asm/emulate_prefix.h" /* __ignore_sync_check__ */
|
||||||
|
|
||||||
#define leXX_to_cpu(t, r) \
|
#define leXX_to_cpu(t, r) \
|
||||||
({ \
|
({ \
|
||||||
@@ -51,6 +54,7 @@
|
|||||||
* insn_init() - initialize struct insn
|
* insn_init() - initialize struct insn
|
||||||
* @insn: &struct insn to be initialized
|
* @insn: &struct insn to be initialized
|
||||||
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
|
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
|
||||||
|
* @buf_len: length of the insn buffer at @kaddr
|
||||||
* @x86_64: !0 for 64-bit kernel or 64-bit app
|
* @x86_64: !0 for 64-bit kernel or 64-bit app
|
||||||
*/
|
*/
|
||||||
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
|
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
|
||||||
@@ -111,8 +115,12 @@ static void insn_get_emulate_prefix(struct insn *insn)
|
|||||||
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
|
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
|
||||||
* to point to the (first) opcode. No effect if @insn->prefixes.got
|
* to point to the (first) opcode. No effect if @insn->prefixes.got
|
||||||
* is already set.
|
* is already set.
|
||||||
|
*
|
||||||
|
* * Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_prefixes(struct insn *insn)
|
int insn_get_prefixes(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *prefixes = &insn->prefixes;
|
struct insn_field *prefixes = &insn->prefixes;
|
||||||
insn_attr_t attr;
|
insn_attr_t attr;
|
||||||
@@ -120,7 +128,7 @@ void insn_get_prefixes(struct insn *insn)
|
|||||||
int i, nb;
|
int i, nb;
|
||||||
|
|
||||||
if (prefixes->got)
|
if (prefixes->got)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
insn_get_emulate_prefix(insn);
|
insn_get_emulate_prefix(insn);
|
||||||
|
|
||||||
@@ -230,8 +238,10 @@ vex_end:
|
|||||||
|
|
||||||
prefixes->got = 1;
|
prefixes->got = 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -243,16 +253,25 @@ err_out:
|
|||||||
* If necessary, first collects any preceding (prefix) bytes.
|
* If necessary, first collects any preceding (prefix) bytes.
|
||||||
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
|
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
|
||||||
* is already 1.
|
* is already 1.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_opcode(struct insn *insn)
|
int insn_get_opcode(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *opcode = &insn->opcode;
|
struct insn_field *opcode = &insn->opcode;
|
||||||
|
int pfx_id, ret;
|
||||||
insn_byte_t op;
|
insn_byte_t op;
|
||||||
int pfx_id;
|
|
||||||
if (opcode->got)
|
if (opcode->got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->prefixes.got)
|
|
||||||
insn_get_prefixes(insn);
|
if (!insn->prefixes.got) {
|
||||||
|
ret = insn_get_prefixes(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Get first opcode */
|
/* Get first opcode */
|
||||||
op = get_next(insn_byte_t, insn);
|
op = get_next(insn_byte_t, insn);
|
||||||
@@ -267,9 +286,13 @@ void insn_get_opcode(struct insn *insn)
|
|||||||
insn->attr = inat_get_avx_attribute(op, m, p);
|
insn->attr = inat_get_avx_attribute(op, m, p);
|
||||||
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
|
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
|
||||||
(!inat_accept_vex(insn->attr) &&
|
(!inat_accept_vex(insn->attr) &&
|
||||||
!inat_is_group(insn->attr)))
|
!inat_is_group(insn->attr))) {
|
||||||
insn->attr = 0; /* This instruction is bad */
|
/* This instruction is bad */
|
||||||
goto end; /* VEX has only 1 byte for opcode */
|
insn->attr = 0;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
/* VEX has only 1 byte for opcode */
|
||||||
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
insn->attr = inat_get_opcode_attribute(op);
|
insn->attr = inat_get_opcode_attribute(op);
|
||||||
@@ -280,13 +303,18 @@ void insn_get_opcode(struct insn *insn)
|
|||||||
pfx_id = insn_last_prefix_id(insn);
|
pfx_id = insn_last_prefix_id(insn);
|
||||||
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
|
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
|
||||||
}
|
}
|
||||||
if (inat_must_vex(insn->attr))
|
|
||||||
insn->attr = 0; /* This instruction is bad */
|
if (inat_must_vex(insn->attr)) {
|
||||||
|
/* This instruction is bad */
|
||||||
|
insn->attr = 0;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
end:
|
end:
|
||||||
opcode->got = 1;
|
opcode->got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -296,15 +324,25 @@ err_out:
|
|||||||
* Populates @insn->modrm and updates @insn->next_byte to point past the
|
* Populates @insn->modrm and updates @insn->next_byte to point past the
|
||||||
* ModRM byte, if any. If necessary, first collects the preceding bytes
|
* ModRM byte, if any. If necessary, first collects the preceding bytes
|
||||||
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
|
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_modrm(struct insn *insn)
|
int insn_get_modrm(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *modrm = &insn->modrm;
|
struct insn_field *modrm = &insn->modrm;
|
||||||
insn_byte_t pfx_id, mod;
|
insn_byte_t pfx_id, mod;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (modrm->got)
|
if (modrm->got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->opcode.got)
|
|
||||||
insn_get_opcode(insn);
|
if (!insn->opcode.got) {
|
||||||
|
ret = insn_get_opcode(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (inat_has_modrm(insn->attr)) {
|
if (inat_has_modrm(insn->attr)) {
|
||||||
mod = get_next(insn_byte_t, insn);
|
mod = get_next(insn_byte_t, insn);
|
||||||
@@ -313,17 +351,22 @@ void insn_get_modrm(struct insn *insn)
|
|||||||
pfx_id = insn_last_prefix_id(insn);
|
pfx_id = insn_last_prefix_id(insn);
|
||||||
insn->attr = inat_get_group_attribute(mod, pfx_id,
|
insn->attr = inat_get_group_attribute(mod, pfx_id,
|
||||||
insn->attr);
|
insn->attr);
|
||||||
if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
|
if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
|
||||||
insn->attr = 0; /* This is bad */
|
/* Bad insn */
|
||||||
|
insn->attr = 0;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (insn->x86_64 && inat_is_force64(insn->attr))
|
if (insn->x86_64 && inat_is_force64(insn->attr))
|
||||||
insn->opnd_bytes = 8;
|
insn->opnd_bytes = 8;
|
||||||
|
|
||||||
modrm->got = 1;
|
modrm->got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -337,11 +380,16 @@ err_out:
|
|||||||
int insn_rip_relative(struct insn *insn)
|
int insn_rip_relative(struct insn *insn)
|
||||||
{
|
{
|
||||||
struct insn_field *modrm = &insn->modrm;
|
struct insn_field *modrm = &insn->modrm;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!insn->x86_64)
|
if (!insn->x86_64)
|
||||||
return 0;
|
return 0;
|
||||||
if (!modrm->got)
|
|
||||||
insn_get_modrm(insn);
|
if (!modrm->got) {
|
||||||
|
ret = insn_get_modrm(insn);
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* For rip-relative instructions, the mod field (top 2 bits)
|
* For rip-relative instructions, the mod field (top 2 bits)
|
||||||
* is zero and the r/m field (bottom 3 bits) is 0x5.
|
* is zero and the r/m field (bottom 3 bits) is 0x5.
|
||||||
@@ -355,15 +403,25 @@ int insn_rip_relative(struct insn *insn)
|
|||||||
*
|
*
|
||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* ModRM byte.
|
* ModRM byte.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: if decoding succeeded
|
||||||
|
* < 0: otherwise.
|
||||||
*/
|
*/
|
||||||
void insn_get_sib(struct insn *insn)
|
int insn_get_sib(struct insn *insn)
|
||||||
{
|
{
|
||||||
insn_byte_t modrm;
|
insn_byte_t modrm;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->sib.got)
|
if (insn->sib.got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->modrm.got)
|
|
||||||
insn_get_modrm(insn);
|
if (!insn->modrm.got) {
|
||||||
|
ret = insn_get_modrm(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (insn->modrm.nbytes) {
|
if (insn->modrm.nbytes) {
|
||||||
modrm = insn->modrm.bytes[0];
|
modrm = insn->modrm.bytes[0];
|
||||||
if (insn->addr_bytes != 2 &&
|
if (insn->addr_bytes != 2 &&
|
||||||
@@ -374,8 +432,10 @@ void insn_get_sib(struct insn *insn)
|
|||||||
}
|
}
|
||||||
insn->sib.got = 1;
|
insn->sib.got = 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -386,15 +446,25 @@ err_out:
|
|||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* SIB byte.
|
* SIB byte.
|
||||||
* Displacement value is sign-expanded.
|
* Displacement value is sign-expanded.
|
||||||
|
*
|
||||||
|
* * Returns:
|
||||||
|
* 0: if decoding succeeded
|
||||||
|
* < 0: otherwise.
|
||||||
*/
|
*/
|
||||||
void insn_get_displacement(struct insn *insn)
|
int insn_get_displacement(struct insn *insn)
|
||||||
{
|
{
|
||||||
insn_byte_t mod, rm, base;
|
insn_byte_t mod, rm, base;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->displacement.got)
|
if (insn->displacement.got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->sib.got)
|
|
||||||
insn_get_sib(insn);
|
if (!insn->sib.got) {
|
||||||
|
ret = insn_get_sib(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (insn->modrm.nbytes) {
|
if (insn->modrm.nbytes) {
|
||||||
/*
|
/*
|
||||||
* Interpreting the modrm byte:
|
* Interpreting the modrm byte:
|
||||||
@@ -436,9 +506,10 @@ void insn_get_displacement(struct insn *insn)
|
|||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
insn->displacement.got = 1;
|
insn->displacement.got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Decode moffset16/32/64. Return 0 if failed */
|
/* Decode moffset16/32/64. Return 0 if failed */
|
||||||
@@ -537,20 +608,30 @@ err_out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* insn_get_immediate() - Get the immediates of instruction
|
* insn_get_immediate() - Get the immediate in an instruction
|
||||||
* @insn: &struct insn containing instruction
|
* @insn: &struct insn containing instruction
|
||||||
*
|
*
|
||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* displacement bytes.
|
* displacement bytes.
|
||||||
* Basically, most of immediates are sign-expanded. Unsigned-value can be
|
* Basically, most of immediates are sign-expanded. Unsigned-value can be
|
||||||
* get by bit masking with ((1 << (nbytes * 8)) - 1)
|
* computed by bit masking with ((1 << (nbytes * 8)) - 1)
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: on success
|
||||||
|
* < 0: on error
|
||||||
*/
|
*/
|
||||||
void insn_get_immediate(struct insn *insn)
|
int insn_get_immediate(struct insn *insn)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->immediate.got)
|
if (insn->immediate.got)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->displacement.got)
|
|
||||||
insn_get_displacement(insn);
|
if (!insn->displacement.got) {
|
||||||
|
ret = insn_get_displacement(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (inat_has_moffset(insn->attr)) {
|
if (inat_has_moffset(insn->attr)) {
|
||||||
if (!__get_moffset(insn))
|
if (!__get_moffset(insn))
|
||||||
@@ -597,9 +678,10 @@ void insn_get_immediate(struct insn *insn)
|
|||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
insn->immediate.got = 1;
|
insn->immediate.got = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -608,13 +690,65 @@ err_out:
|
|||||||
*
|
*
|
||||||
* If necessary, first collects the instruction up to and including the
|
* If necessary, first collects the instruction up to and including the
|
||||||
* immediates bytes.
|
* immediates bytes.
|
||||||
*/
|
*
|
||||||
void insn_get_length(struct insn *insn)
|
* Returns:
|
||||||
|
* - 0 on success
|
||||||
|
* - < 0 on error
|
||||||
|
*/
|
||||||
|
int insn_get_length(struct insn *insn)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (insn->length)
|
if (insn->length)
|
||||||
return;
|
return 0;
|
||||||
if (!insn->immediate.got)
|
|
||||||
insn_get_immediate(insn);
|
if (!insn->immediate.got) {
|
||||||
|
ret = insn_get_immediate(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
insn->length = (unsigned char)((unsigned long)insn->next_byte
|
insn->length = (unsigned char)((unsigned long)insn->next_byte
|
||||||
- (unsigned long)insn->kaddr);
|
- (unsigned long)insn->kaddr);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure this instruction is decoded completely */
|
||||||
|
static inline int insn_complete(struct insn *insn)
|
||||||
|
{
|
||||||
|
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
|
||||||
|
insn->displacement.got && insn->immediate.got;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* insn_decode() - Decode an x86 instruction
|
||||||
|
* @insn: &struct insn to be initialized
|
||||||
|
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
|
||||||
|
* @buf_len: length of the insn buffer at @kaddr
|
||||||
|
* @m: insn mode, see enum insn_mode
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0: if decoding succeeded
|
||||||
|
* < 0: otherwise.
|
||||||
|
*/
|
||||||
|
int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
#define INSN_MODE_KERN (enum insn_mode)-1 /* __ignore_sync_check__ mode is only valid in the kernel */
|
||||||
|
|
||||||
|
if (m == INSN_MODE_KERN)
|
||||||
|
insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
|
||||||
|
else
|
||||||
|
insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
|
||||||
|
|
||||||
|
ret = insn_get_length(insn);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (insn_complete(insn))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
73
tools/include/linux/kconfig.h
Normal file
73
tools/include/linux/kconfig.h
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _TOOLS_LINUX_KCONFIG_H
|
||||||
|
#define _TOOLS_LINUX_KCONFIG_H
|
||||||
|
|
||||||
|
/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
|
#define __BIG_ENDIAN 4321
|
||||||
|
#else
|
||||||
|
#define __LITTLE_ENDIAN 1234
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define __ARG_PLACEHOLDER_1 0,
|
||||||
|
#define __take_second_arg(__ignored, val, ...) val
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The use of "&&" / "||" is limited in certain expressions.
|
||||||
|
* The following enable to calculate "and" / "or" with macro expansion only.
|
||||||
|
*/
|
||||||
|
#define __and(x, y) ___and(x, y)
|
||||||
|
#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
|
||||||
|
#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0)
|
||||||
|
|
||||||
|
#define __or(x, y) ___or(x, y)
|
||||||
|
#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y)
|
||||||
|
#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper macros to use CONFIG_ options in C/CPP expressions. Note that
|
||||||
|
* these only work with boolean and tristate options.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Getting something that works in C and CPP for an arg that may or may
|
||||||
|
* not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
|
||||||
|
* we match on the placeholder define, insert the "0," for arg1 and generate
|
||||||
|
* the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
|
||||||
|
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
|
||||||
|
* the last step cherry picks the 2nd arg, we get a zero.
|
||||||
|
*/
|
||||||
|
#define __is_defined(x) ___is_defined(x)
|
||||||
|
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
|
||||||
|
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
|
||||||
|
* otherwise. For boolean options, this is equivalent to
|
||||||
|
* IS_ENABLED(CONFIG_FOO).
|
||||||
|
*/
|
||||||
|
#define IS_BUILTIN(option) __is_defined(option)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
|
||||||
|
* otherwise.
|
||||||
|
*/
|
||||||
|
#define IS_MODULE(option) __is_defined(option##_MODULE)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
|
||||||
|
* code can call a function defined in code compiled based on CONFIG_FOO.
|
||||||
|
* This is similar to IS_ENABLED(), but returns false when invoked from
|
||||||
|
* built-in code when CONFIG_FOO is set to 'm'.
|
||||||
|
*/
|
||||||
|
#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \
|
||||||
|
__and(IS_MODULE(option), __is_defined(MODULE)))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
|
||||||
|
* 0 otherwise.
|
||||||
|
*/
|
||||||
|
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
|
||||||
|
|
||||||
|
#endif /* _TOOLS_LINUX_KCONFIG_H */
|
||||||
@@ -11,11 +11,15 @@
|
|||||||
#include "../../../arch/x86/lib/inat.c"
|
#include "../../../arch/x86/lib/inat.c"
|
||||||
#include "../../../arch/x86/lib/insn.c"
|
#include "../../../arch/x86/lib/insn.c"
|
||||||
|
|
||||||
|
#define CONFIG_64BIT 1
|
||||||
|
#include <asm/nops.h>
|
||||||
|
|
||||||
#include <asm/orc_types.h>
|
#include <asm/orc_types.h>
|
||||||
#include <objtool/check.h>
|
#include <objtool/check.h>
|
||||||
#include <objtool/elf.h>
|
#include <objtool/elf.h>
|
||||||
#include <objtool/arch.h>
|
#include <objtool/arch.h>
|
||||||
#include <objtool/warn.h>
|
#include <objtool/warn.h>
|
||||||
|
#include <arch/elf.h>
|
||||||
|
|
||||||
static unsigned char op_to_cfi_reg[][2] = {
|
static unsigned char op_to_cfi_reg[][2] = {
|
||||||
{CFI_AX, CFI_R8},
|
{CFI_AX, CFI_R8},
|
||||||
@@ -90,7 +94,7 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
|
|||||||
struct list_head *ops_list)
|
struct list_head *ops_list)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
int x86_64, sign;
|
int x86_64, sign, ret;
|
||||||
unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
|
unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
|
||||||
rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
|
rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
|
||||||
modrm_reg = 0, sib = 0;
|
modrm_reg = 0, sib = 0;
|
||||||
@@ -101,10 +105,9 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
|
|||||||
if (x86_64 == -1)
|
if (x86_64 == -1)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
|
ret = insn_decode(&insn, sec->data->d_buf + offset, maxlen,
|
||||||
insn_get_length(&insn);
|
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
|
||||||
|
if (ret < 0) {
|
||||||
if (!insn_complete(&insn)) {
|
|
||||||
WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
|
WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -596,11 +599,11 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state)
|
|||||||
const char *arch_nop_insn(int len)
|
const char *arch_nop_insn(int len)
|
||||||
{
|
{
|
||||||
static const char nops[5][5] = {
|
static const char nops[5][5] = {
|
||||||
/* 1 */ { 0x90 },
|
{ BYTES_NOP1 },
|
||||||
/* 2 */ { 0x66, 0x90 },
|
{ BYTES_NOP2 },
|
||||||
/* 3 */ { 0x0f, 0x1f, 0x00 },
|
{ BYTES_NOP3 },
|
||||||
/* 4 */ { 0x0f, 0x1f, 0x40, 0x00 },
|
{ BYTES_NOP4 },
|
||||||
/* 5 */ { 0x0f, 0x1f, 0x44, 0x00, 0x00 },
|
{ BYTES_NOP5 },
|
||||||
};
|
};
|
||||||
|
|
||||||
if (len < 1 || len > 5) {
|
if (len < 1 || len > 5) {
|
||||||
@@ -611,6 +614,122 @@ const char *arch_nop_insn(int len)
|
|||||||
return nops[len-1];
|
return nops[len-1];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* asm/alternative.h ? */
|
||||||
|
|
||||||
|
#define ALTINSTR_FLAG_INV (1 << 15)
|
||||||
|
#define ALT_NOT(feat) ((feat) | ALTINSTR_FLAG_INV)
|
||||||
|
|
||||||
|
struct alt_instr {
|
||||||
|
s32 instr_offset; /* original instruction */
|
||||||
|
s32 repl_offset; /* offset to replacement instruction */
|
||||||
|
u16 cpuid; /* cpuid bit set for replacement */
|
||||||
|
u8 instrlen; /* length of original instruction */
|
||||||
|
u8 replacementlen; /* length of new instruction */
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
static int elf_add_alternative(struct elf *elf,
|
||||||
|
struct instruction *orig, struct symbol *sym,
|
||||||
|
int cpuid, u8 orig_len, u8 repl_len)
|
||||||
|
{
|
||||||
|
const int size = sizeof(struct alt_instr);
|
||||||
|
struct alt_instr *alt;
|
||||||
|
struct section *sec;
|
||||||
|
Elf_Scn *s;
|
||||||
|
|
||||||
|
sec = find_section_by_name(elf, ".altinstructions");
|
||||||
|
if (!sec) {
|
||||||
|
sec = elf_create_section(elf, ".altinstructions",
|
||||||
|
SHF_WRITE, size, 0);
|
||||||
|
|
||||||
|
if (!sec) {
|
||||||
|
WARN_ELF("elf_create_section");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s = elf_getscn(elf->elf, sec->idx);
|
||||||
|
if (!s) {
|
||||||
|
WARN_ELF("elf_getscn");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sec->data = elf_newdata(s);
|
||||||
|
if (!sec->data) {
|
||||||
|
WARN_ELF("elf_newdata");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sec->data->d_size = size;
|
||||||
|
sec->data->d_align = 1;
|
||||||
|
|
||||||
|
alt = sec->data->d_buf = malloc(size);
|
||||||
|
if (!sec->data->d_buf) {
|
||||||
|
perror("malloc");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
memset(sec->data->d_buf, 0, size);
|
||||||
|
|
||||||
|
if (elf_add_reloc_to_insn(elf, sec, sec->sh.sh_size,
|
||||||
|
R_X86_64_PC32, orig->sec, orig->offset)) {
|
||||||
|
WARN("elf_create_reloc: alt_instr::instr_offset");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (elf_add_reloc(elf, sec, sec->sh.sh_size + 4,
|
||||||
|
R_X86_64_PC32, sym, 0)) {
|
||||||
|
WARN("elf_create_reloc: alt_instr::repl_offset");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
alt->cpuid = cpuid;
|
||||||
|
alt->instrlen = orig_len;
|
||||||
|
alt->replacementlen = repl_len;
|
||||||
|
|
||||||
|
sec->sh.sh_size += size;
|
||||||
|
sec->changed = true;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define X86_FEATURE_RETPOLINE ( 7*32+12)
|
||||||
|
|
||||||
|
int arch_rewrite_retpolines(struct objtool_file *file)
|
||||||
|
{
|
||||||
|
struct instruction *insn;
|
||||||
|
struct reloc *reloc;
|
||||||
|
struct symbol *sym;
|
||||||
|
char name[32] = "";
|
||||||
|
|
||||||
|
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
|
||||||
|
|
||||||
|
if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
reloc = insn->reloc;
|
||||||
|
|
||||||
|
sprintf(name, "__x86_indirect_alt_%s_%s",
|
||||||
|
insn->type == INSN_JUMP_DYNAMIC ? "jmp" : "call",
|
||||||
|
reloc->sym->name + 21);
|
||||||
|
|
||||||
|
sym = find_symbol_by_name(file->elf, name);
|
||||||
|
if (!sym) {
|
||||||
|
sym = elf_create_undef_symbol(file->elf, name);
|
||||||
|
if (!sym) {
|
||||||
|
WARN("elf_create_undef_symbol");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (elf_add_alternative(file->elf, insn, sym,
|
||||||
|
ALT_NOT(X86_FEATURE_RETPOLINE), 5, 5)) {
|
||||||
|
WARN("elf_add_alternative");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg)
|
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg)
|
||||||
{
|
{
|
||||||
struct cfi_reg *cfa = &insn->cfi.cfa;
|
struct cfi_reg *cfa = &insn->cfi.cfa;
|
||||||
@@ -646,3 +765,8 @@ int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool arch_is_retpoline(struct symbol *sym)
|
||||||
|
{
|
||||||
|
return !strncmp(sym->name, "__x86_indirect_", 15);
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
#define JUMP_ORIG_OFFSET 0
|
#define JUMP_ORIG_OFFSET 0
|
||||||
#define JUMP_NEW_OFFSET 4
|
#define JUMP_NEW_OFFSET 4
|
||||||
|
|
||||||
#define ALT_ENTRY_SIZE 13
|
#define ALT_ENTRY_SIZE 12
|
||||||
#define ALT_ORIG_OFFSET 0
|
#define ALT_ORIG_OFFSET 0
|
||||||
#define ALT_NEW_OFFSET 4
|
#define ALT_NEW_OFFSET 4
|
||||||
#define ALT_FEATURE_OFFSET 8
|
#define ALT_FEATURE_OFFSET 8
|
||||||
|
|||||||
@@ -433,8 +433,7 @@ reachable:
|
|||||||
|
|
||||||
static int create_static_call_sections(struct objtool_file *file)
|
static int create_static_call_sections(struct objtool_file *file)
|
||||||
{
|
{
|
||||||
struct section *sec, *reloc_sec;
|
struct section *sec;
|
||||||
struct reloc *reloc;
|
|
||||||
struct static_call_site *site;
|
struct static_call_site *site;
|
||||||
struct instruction *insn;
|
struct instruction *insn;
|
||||||
struct symbol *key_sym;
|
struct symbol *key_sym;
|
||||||
@@ -452,7 +451,7 @@ static int create_static_call_sections(struct objtool_file *file)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
idx = 0;
|
idx = 0;
|
||||||
list_for_each_entry(insn, &file->static_call_list, static_call_node)
|
list_for_each_entry(insn, &file->static_call_list, call_node)
|
||||||
idx++;
|
idx++;
|
||||||
|
|
||||||
sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
|
sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
|
||||||
@@ -460,36 +459,18 @@ static int create_static_call_sections(struct objtool_file *file)
|
|||||||
if (!sec)
|
if (!sec)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
|
|
||||||
if (!reloc_sec)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
idx = 0;
|
idx = 0;
|
||||||
list_for_each_entry(insn, &file->static_call_list, static_call_node) {
|
list_for_each_entry(insn, &file->static_call_list, call_node) {
|
||||||
|
|
||||||
site = (struct static_call_site *)sec->data->d_buf + idx;
|
site = (struct static_call_site *)sec->data->d_buf + idx;
|
||||||
memset(site, 0, sizeof(struct static_call_site));
|
memset(site, 0, sizeof(struct static_call_site));
|
||||||
|
|
||||||
/* populate reloc for 'addr' */
|
/* populate reloc for 'addr' */
|
||||||
reloc = malloc(sizeof(*reloc));
|
if (elf_add_reloc_to_insn(file->elf, sec,
|
||||||
|
idx * sizeof(struct static_call_site),
|
||||||
if (!reloc) {
|
R_X86_64_PC32,
|
||||||
perror("malloc");
|
insn->sec, insn->offset))
|
||||||
return -1;
|
return -1;
|
||||||
}
|
|
||||||
memset(reloc, 0, sizeof(*reloc));
|
|
||||||
|
|
||||||
insn_to_reloc_sym_addend(insn->sec, insn->offset, reloc);
|
|
||||||
if (!reloc->sym) {
|
|
||||||
WARN_FUNC("static call tramp: missing containing symbol",
|
|
||||||
insn->sec, insn->offset);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
reloc->type = R_X86_64_PC32;
|
|
||||||
reloc->offset = idx * sizeof(struct static_call_site);
|
|
||||||
reloc->sec = reloc_sec;
|
|
||||||
elf_add_reloc(file->elf, reloc);
|
|
||||||
|
|
||||||
/* find key symbol */
|
/* find key symbol */
|
||||||
key_name = strdup(insn->call_dest->name);
|
key_name = strdup(insn->call_dest->name);
|
||||||
@@ -526,32 +507,21 @@ static int create_static_call_sections(struct objtool_file *file)
|
|||||||
free(key_name);
|
free(key_name);
|
||||||
|
|
||||||
/* populate reloc for 'key' */
|
/* populate reloc for 'key' */
|
||||||
reloc = malloc(sizeof(*reloc));
|
if (elf_add_reloc(file->elf, sec,
|
||||||
if (!reloc) {
|
idx * sizeof(struct static_call_site) + 4,
|
||||||
perror("malloc");
|
R_X86_64_PC32, key_sym,
|
||||||
|
is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
|
||||||
return -1;
|
return -1;
|
||||||
}
|
|
||||||
memset(reloc, 0, sizeof(*reloc));
|
|
||||||
reloc->sym = key_sym;
|
|
||||||
reloc->addend = is_sibling_call(insn) ? STATIC_CALL_SITE_TAIL : 0;
|
|
||||||
reloc->type = R_X86_64_PC32;
|
|
||||||
reloc->offset = idx * sizeof(struct static_call_site) + 4;
|
|
||||||
reloc->sec = reloc_sec;
|
|
||||||
elf_add_reloc(file->elf, reloc);
|
|
||||||
|
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (elf_rebuild_reloc_section(file->elf, reloc_sec))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int create_mcount_loc_sections(struct objtool_file *file)
|
static int create_mcount_loc_sections(struct objtool_file *file)
|
||||||
{
|
{
|
||||||
struct section *sec, *reloc_sec;
|
struct section *sec;
|
||||||
struct reloc *reloc;
|
|
||||||
unsigned long *loc;
|
unsigned long *loc;
|
||||||
struct instruction *insn;
|
struct instruction *insn;
|
||||||
int idx;
|
int idx;
|
||||||
@@ -574,49 +544,21 @@ static int create_mcount_loc_sections(struct objtool_file *file)
|
|||||||
if (!sec)
|
if (!sec)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
|
|
||||||
if (!reloc_sec)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
idx = 0;
|
idx = 0;
|
||||||
list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
|
list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
|
||||||
|
|
||||||
loc = (unsigned long *)sec->data->d_buf + idx;
|
loc = (unsigned long *)sec->data->d_buf + idx;
|
||||||
memset(loc, 0, sizeof(unsigned long));
|
memset(loc, 0, sizeof(unsigned long));
|
||||||
|
|
||||||
reloc = malloc(sizeof(*reloc));
|
if (elf_add_reloc_to_insn(file->elf, sec,
|
||||||
if (!reloc) {
|
idx * sizeof(unsigned long),
|
||||||
perror("malloc");
|
R_X86_64_64,
|
||||||
|
insn->sec, insn->offset))
|
||||||
return -1;
|
return -1;
|
||||||
}
|
|
||||||
memset(reloc, 0, sizeof(*reloc));
|
|
||||||
|
|
||||||
if (insn->sec->sym) {
|
|
||||||
reloc->sym = insn->sec->sym;
|
|
||||||
reloc->addend = insn->offset;
|
|
||||||
} else {
|
|
||||||
reloc->sym = find_symbol_containing(insn->sec, insn->offset);
|
|
||||||
|
|
||||||
if (!reloc->sym) {
|
|
||||||
WARN("missing symbol for insn at offset 0x%lx\n",
|
|
||||||
insn->offset);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
reloc->addend = insn->offset - reloc->sym->offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
reloc->type = R_X86_64_64;
|
|
||||||
reloc->offset = idx * sizeof(unsigned long);
|
|
||||||
reloc->sec = reloc_sec;
|
|
||||||
elf_add_reloc(file->elf, reloc);
|
|
||||||
|
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (elf_rebuild_reloc_section(file->elf, reloc_sec))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -850,6 +792,30 @@ static int add_ignore_alternatives(struct objtool_file *file)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__weak bool arch_is_retpoline(struct symbol *sym)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define NEGATIVE_RELOC ((void *)-1L)
|
||||||
|
|
||||||
|
static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
|
||||||
|
{
|
||||||
|
if (insn->reloc == NEGATIVE_RELOC)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!insn->reloc) {
|
||||||
|
insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
|
||||||
|
insn->offset, insn->len);
|
||||||
|
if (!insn->reloc) {
|
||||||
|
insn->reloc = NEGATIVE_RELOC;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return insn->reloc;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the destination instructions for all jumps.
|
* Find the destination instructions for all jumps.
|
||||||
*/
|
*/
|
||||||
@@ -864,16 +830,14 @@ static int add_jump_destinations(struct objtool_file *file)
|
|||||||
if (!is_static_jump(insn))
|
if (!is_static_jump(insn))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
|
reloc = insn_reloc(file, insn);
|
||||||
insn->offset, insn->len);
|
|
||||||
if (!reloc) {
|
if (!reloc) {
|
||||||
dest_sec = insn->sec;
|
dest_sec = insn->sec;
|
||||||
dest_off = arch_jump_destination(insn);
|
dest_off = arch_jump_destination(insn);
|
||||||
} else if (reloc->sym->type == STT_SECTION) {
|
} else if (reloc->sym->type == STT_SECTION) {
|
||||||
dest_sec = reloc->sym->sec;
|
dest_sec = reloc->sym->sec;
|
||||||
dest_off = arch_dest_reloc_offset(reloc->addend);
|
dest_off = arch_dest_reloc_offset(reloc->addend);
|
||||||
} else if (!strncmp(reloc->sym->name, "__x86_indirect_thunk_", 21) ||
|
} else if (arch_is_retpoline(reloc->sym)) {
|
||||||
!strncmp(reloc->sym->name, "__x86_retpoline_", 16)) {
|
|
||||||
/*
|
/*
|
||||||
* Retpoline jumps are really dynamic jumps in
|
* Retpoline jumps are really dynamic jumps in
|
||||||
* disguise, so convert them accordingly.
|
* disguise, so convert them accordingly.
|
||||||
@@ -883,13 +847,16 @@ static int add_jump_destinations(struct objtool_file *file)
|
|||||||
else
|
else
|
||||||
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
|
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
|
||||||
|
|
||||||
|
list_add_tail(&insn->call_node,
|
||||||
|
&file->retpoline_call_list);
|
||||||
|
|
||||||
insn->retpoline_safe = true;
|
insn->retpoline_safe = true;
|
||||||
continue;
|
continue;
|
||||||
} else if (insn->func) {
|
} else if (insn->func) {
|
||||||
/* internal or external sibling call (with reloc) */
|
/* internal or external sibling call (with reloc) */
|
||||||
insn->call_dest = reloc->sym;
|
insn->call_dest = reloc->sym;
|
||||||
if (insn->call_dest->static_call_tramp) {
|
if (insn->call_dest->static_call_tramp) {
|
||||||
list_add_tail(&insn->static_call_node,
|
list_add_tail(&insn->call_node,
|
||||||
&file->static_call_list);
|
&file->static_call_list);
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@@ -951,7 +918,7 @@ static int add_jump_destinations(struct objtool_file *file)
|
|||||||
/* internal sibling call (without reloc) */
|
/* internal sibling call (without reloc) */
|
||||||
insn->call_dest = insn->jump_dest->func;
|
insn->call_dest = insn->jump_dest->func;
|
||||||
if (insn->call_dest->static_call_tramp) {
|
if (insn->call_dest->static_call_tramp) {
|
||||||
list_add_tail(&insn->static_call_node,
|
list_add_tail(&insn->call_node,
|
||||||
&file->static_call_list);
|
&file->static_call_list);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -995,8 +962,7 @@ static int add_call_destinations(struct objtool_file *file)
|
|||||||
if (insn->type != INSN_CALL)
|
if (insn->type != INSN_CALL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
|
reloc = insn_reloc(file, insn);
|
||||||
insn->offset, insn->len);
|
|
||||||
if (!reloc) {
|
if (!reloc) {
|
||||||
dest_off = arch_jump_destination(insn);
|
dest_off = arch_jump_destination(insn);
|
||||||
insn->call_dest = find_call_destination(insn->sec, dest_off);
|
insn->call_dest = find_call_destination(insn->sec, dest_off);
|
||||||
@@ -1026,9 +992,29 @@ static int add_call_destinations(struct objtool_file *file)
|
|||||||
dest_off);
|
dest_off);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} else if (arch_is_retpoline(reloc->sym)) {
|
||||||
|
/*
|
||||||
|
* Retpoline calls are really dynamic calls in
|
||||||
|
* disguise, so convert them accordingly.
|
||||||
|
*/
|
||||||
|
insn->type = INSN_CALL_DYNAMIC;
|
||||||
|
insn->retpoline_safe = true;
|
||||||
|
|
||||||
|
list_add_tail(&insn->call_node,
|
||||||
|
&file->retpoline_call_list);
|
||||||
|
|
||||||
|
remove_insn_ops(insn);
|
||||||
|
continue;
|
||||||
|
|
||||||
} else
|
} else
|
||||||
insn->call_dest = reloc->sym;
|
insn->call_dest = reloc->sym;
|
||||||
|
|
||||||
|
if (insn->call_dest && insn->call_dest->static_call_tramp) {
|
||||||
|
list_add_tail(&insn->call_node,
|
||||||
|
&file->static_call_list);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Many compilers cannot disable KCOV with a function attribute
|
* Many compilers cannot disable KCOV with a function attribute
|
||||||
* so they need a little help, NOP out any KCOV calls from noinstr
|
* so they need a little help, NOP out any KCOV calls from noinstr
|
||||||
@@ -1175,8 +1161,7 @@ static int handle_group_alt(struct objtool_file *file,
|
|||||||
* alternatives code can adjust the relative offsets
|
* alternatives code can adjust the relative offsets
|
||||||
* accordingly.
|
* accordingly.
|
||||||
*/
|
*/
|
||||||
alt_reloc = find_reloc_by_dest_range(file->elf, insn->sec,
|
alt_reloc = insn_reloc(file, insn);
|
||||||
insn->offset, insn->len);
|
|
||||||
if (alt_reloc &&
|
if (alt_reloc &&
|
||||||
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
|
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
|
||||||
|
|
||||||
@@ -1751,6 +1736,11 @@ static void mark_rodata(struct objtool_file *file)
|
|||||||
file->rodata = found;
|
file->rodata = found;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__weak int arch_rewrite_retpolines(struct objtool_file *file)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int decode_sections(struct objtool_file *file)
|
static int decode_sections(struct objtool_file *file)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@@ -1772,10 +1762,17 @@ static int decode_sections(struct objtool_file *file)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be before add_{jump_call}_destination.
|
||||||
|
*/
|
||||||
ret = read_static_call_tramps(file);
|
ret = read_static_call_tramps(file);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be before add_special_section_alts() as that depends on
|
||||||
|
* jump_dest being set.
|
||||||
|
*/
|
||||||
ret = add_jump_destinations(file);
|
ret = add_jump_destinations(file);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -1784,6 +1781,10 @@ static int decode_sections(struct objtool_file *file)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be before add_call_destination(); it changes INSN_CALL to
|
||||||
|
* INSN_JUMP.
|
||||||
|
*/
|
||||||
ret = read_intra_function_calls(file);
|
ret = read_intra_function_calls(file);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -1808,6 +1809,15 @@ static int decode_sections(struct objtool_file *file)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be after add_special_section_alts(), since this will emit
|
||||||
|
* alternatives. Must be after add_{jump,call}_destination(), since
|
||||||
|
* those create the call insn lists.
|
||||||
|
*/
|
||||||
|
ret = arch_rewrite_retpolines(file);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2746,11 +2756,6 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
|
|||||||
if (dead_end_function(file, insn->call_dest))
|
if (dead_end_function(file, insn->call_dest))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (insn->type == INSN_CALL && insn->call_dest->static_call_tramp) {
|
|
||||||
list_add_tail(&insn->static_call_node,
|
|
||||||
&file->static_call_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INSN_JUMP_CONDITIONAL:
|
case INSN_JUMP_CONDITIONAL:
|
||||||
|
|||||||
@@ -211,32 +211,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
|
|||||||
return find_reloc_by_dest_range(elf, sec, offset, 1);
|
return find_reloc_by_dest_range(elf, sec, offset, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
|
|
||||||
struct reloc *reloc)
|
|
||||||
{
|
|
||||||
if (sec->sym) {
|
|
||||||
reloc->sym = sec->sym;
|
|
||||||
reloc->addend = offset;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The Clang assembler strips section symbols, so we have to reference
|
|
||||||
* the function symbol instead:
|
|
||||||
*/
|
|
||||||
reloc->sym = find_symbol_containing(sec, offset);
|
|
||||||
if (!reloc->sym) {
|
|
||||||
/*
|
|
||||||
* Hack alert. This happens when we need to reference the NOP
|
|
||||||
* pad insn immediately after the function.
|
|
||||||
*/
|
|
||||||
reloc->sym = find_symbol_containing(sec, offset - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (reloc->sym)
|
|
||||||
reloc->addend = offset - reloc->sym->offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int read_sections(struct elf *elf)
|
static int read_sections(struct elf *elf)
|
||||||
{
|
{
|
||||||
Elf_Scn *s = NULL;
|
Elf_Scn *s = NULL;
|
||||||
@@ -316,12 +290,39 @@ static int read_sections(struct elf *elf)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void elf_add_symbol(struct elf *elf, struct symbol *sym)
|
||||||
|
{
|
||||||
|
struct list_head *entry;
|
||||||
|
struct rb_node *pnode;
|
||||||
|
|
||||||
|
sym->type = GELF_ST_TYPE(sym->sym.st_info);
|
||||||
|
sym->bind = GELF_ST_BIND(sym->sym.st_info);
|
||||||
|
|
||||||
|
sym->offset = sym->sym.st_value;
|
||||||
|
sym->len = sym->sym.st_size;
|
||||||
|
|
||||||
|
rb_add(&sym->node, &sym->sec->symbol_tree, symbol_to_offset);
|
||||||
|
pnode = rb_prev(&sym->node);
|
||||||
|
if (pnode)
|
||||||
|
entry = &rb_entry(pnode, struct symbol, node)->list;
|
||||||
|
else
|
||||||
|
entry = &sym->sec->symbol_list;
|
||||||
|
list_add(&sym->list, entry);
|
||||||
|
elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
|
||||||
|
elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't store empty STT_NOTYPE symbols in the rbtree. They
|
||||||
|
* can exist within a function, confusing the sorting.
|
||||||
|
*/
|
||||||
|
if (!sym->len)
|
||||||
|
rb_erase(&sym->node, &sym->sec->symbol_tree);
|
||||||
|
}
|
||||||
|
|
||||||
static int read_symbols(struct elf *elf)
|
static int read_symbols(struct elf *elf)
|
||||||
{
|
{
|
||||||
struct section *symtab, *symtab_shndx, *sec;
|
struct section *symtab, *symtab_shndx, *sec;
|
||||||
struct symbol *sym, *pfunc;
|
struct symbol *sym, *pfunc;
|
||||||
struct list_head *entry;
|
|
||||||
struct rb_node *pnode;
|
|
||||||
int symbols_nr, i;
|
int symbols_nr, i;
|
||||||
char *coldstr;
|
char *coldstr;
|
||||||
Elf_Data *shndx_data = NULL;
|
Elf_Data *shndx_data = NULL;
|
||||||
@@ -366,9 +367,6 @@ static int read_symbols(struct elf *elf)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
sym->type = GELF_ST_TYPE(sym->sym.st_info);
|
|
||||||
sym->bind = GELF_ST_BIND(sym->sym.st_info);
|
|
||||||
|
|
||||||
if ((sym->sym.st_shndx > SHN_UNDEF &&
|
if ((sym->sym.st_shndx > SHN_UNDEF &&
|
||||||
sym->sym.st_shndx < SHN_LORESERVE) ||
|
sym->sym.st_shndx < SHN_LORESERVE) ||
|
||||||
(shndx_data && sym->sym.st_shndx == SHN_XINDEX)) {
|
(shndx_data && sym->sym.st_shndx == SHN_XINDEX)) {
|
||||||
@@ -381,32 +379,14 @@ static int read_symbols(struct elf *elf)
|
|||||||
sym->name);
|
sym->name);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
if (sym->type == STT_SECTION) {
|
if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
|
||||||
sym->name = sym->sec->name;
|
sym->name = sym->sec->name;
|
||||||
sym->sec->sym = sym;
|
sym->sec->sym = sym;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
sym->sec = find_section_by_index(elf, 0);
|
sym->sec = find_section_by_index(elf, 0);
|
||||||
|
|
||||||
sym->offset = sym->sym.st_value;
|
elf_add_symbol(elf, sym);
|
||||||
sym->len = sym->sym.st_size;
|
|
||||||
|
|
||||||
rb_add(&sym->node, &sym->sec->symbol_tree, symbol_to_offset);
|
|
||||||
pnode = rb_prev(&sym->node);
|
|
||||||
if (pnode)
|
|
||||||
entry = &rb_entry(pnode, struct symbol, node)->list;
|
|
||||||
else
|
|
||||||
entry = &sym->sec->symbol_list;
|
|
||||||
list_add(&sym->list, entry);
|
|
||||||
elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
|
|
||||||
elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't store empty STT_NOTYPE symbols in the rbtree. They
|
|
||||||
* can exist within a function, confusing the sorting.
|
|
||||||
*/
|
|
||||||
if (!sym->len)
|
|
||||||
rb_erase(&sym->node, &sym->sec->symbol_tree);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stats)
|
if (stats)
|
||||||
@@ -473,12 +453,73 @@ err:
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void elf_add_reloc(struct elf *elf, struct reloc *reloc)
|
static struct section *elf_create_reloc_section(struct elf *elf,
|
||||||
{
|
struct section *base,
|
||||||
struct section *sec = reloc->sec;
|
int reltype);
|
||||||
|
|
||||||
list_add_tail(&reloc->list, &sec->reloc_list);
|
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
|
||||||
|
unsigned int type, struct symbol *sym, int addend)
|
||||||
|
{
|
||||||
|
struct reloc *reloc;
|
||||||
|
|
||||||
|
if (!sec->reloc && !elf_create_reloc_section(elf, sec, SHT_RELA))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
reloc = malloc(sizeof(*reloc));
|
||||||
|
if (!reloc) {
|
||||||
|
perror("malloc");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
memset(reloc, 0, sizeof(*reloc));
|
||||||
|
|
||||||
|
reloc->sec = sec->reloc;
|
||||||
|
reloc->offset = offset;
|
||||||
|
reloc->type = type;
|
||||||
|
reloc->sym = sym;
|
||||||
|
reloc->addend = addend;
|
||||||
|
|
||||||
|
list_add_tail(&reloc->list, &sec->reloc->reloc_list);
|
||||||
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
|
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
|
||||||
|
|
||||||
|
sec->reloc->changed = true;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
|
||||||
|
unsigned long offset, unsigned int type,
|
||||||
|
struct section *insn_sec, unsigned long insn_off)
|
||||||
|
{
|
||||||
|
struct symbol *sym;
|
||||||
|
int addend;
|
||||||
|
|
||||||
|
if (insn_sec->sym) {
|
||||||
|
sym = insn_sec->sym;
|
||||||
|
addend = insn_off;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* The Clang assembler strips section symbols, so we have to
|
||||||
|
* reference the function symbol instead:
|
||||||
|
*/
|
||||||
|
sym = find_symbol_containing(insn_sec, insn_off);
|
||||||
|
if (!sym) {
|
||||||
|
/*
|
||||||
|
* Hack alert. This happens when we need to reference
|
||||||
|
* the NOP pad insn immediately after the function.
|
||||||
|
*/
|
||||||
|
sym = find_symbol_containing(insn_sec, insn_off - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!sym) {
|
||||||
|
WARN("can't find symbol containing %s+0x%lx", insn_sec->name, insn_off);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
addend = insn_off - sym->offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
return elf_add_reloc(elf, sec, offset, type, sym, addend);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
|
static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
|
||||||
@@ -558,7 +599,9 @@ static int read_relocs(struct elf *elf)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
elf_add_reloc(elf, reloc);
|
list_add_tail(&reloc->list, &sec->reloc_list);
|
||||||
|
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
|
||||||
|
|
||||||
nr_reloc++;
|
nr_reloc++;
|
||||||
}
|
}
|
||||||
max_reloc = max(max_reloc, nr_reloc);
|
max_reloc = max(max_reloc, nr_reloc);
|
||||||
@@ -636,13 +679,108 @@ err:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
|
||||||
|
{
|
||||||
|
Elf_Data *data;
|
||||||
|
Elf_Scn *s;
|
||||||
|
int len;
|
||||||
|
|
||||||
|
if (!strtab)
|
||||||
|
strtab = find_section_by_name(elf, ".strtab");
|
||||||
|
if (!strtab) {
|
||||||
|
WARN("can't find .strtab section");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
s = elf_getscn(elf->elf, strtab->idx);
|
||||||
|
if (!s) {
|
||||||
|
WARN_ELF("elf_getscn");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
data = elf_newdata(s);
|
||||||
|
if (!data) {
|
||||||
|
WARN_ELF("elf_newdata");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
data->d_buf = str;
|
||||||
|
data->d_size = strlen(str) + 1;
|
||||||
|
data->d_align = 1;
|
||||||
|
|
||||||
|
len = strtab->len;
|
||||||
|
strtab->len += data->d_size;
|
||||||
|
strtab->changed = true;
|
||||||
|
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
|
||||||
|
{
|
||||||
|
struct section *symtab;
|
||||||
|
struct symbol *sym;
|
||||||
|
Elf_Data *data;
|
||||||
|
Elf_Scn *s;
|
||||||
|
|
||||||
|
sym = malloc(sizeof(*sym));
|
||||||
|
if (!sym) {
|
||||||
|
perror("malloc");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
memset(sym, 0, sizeof(*sym));
|
||||||
|
|
||||||
|
sym->name = strdup(name);
|
||||||
|
|
||||||
|
sym->sym.st_name = elf_add_string(elf, NULL, sym->name);
|
||||||
|
if (sym->sym.st_name == -1)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
sym->sym.st_info = GELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
|
||||||
|
// st_other 0
|
||||||
|
// st_shndx 0
|
||||||
|
// st_value 0
|
||||||
|
// st_size 0
|
||||||
|
|
||||||
|
symtab = find_section_by_name(elf, ".symtab");
|
||||||
|
if (!symtab) {
|
||||||
|
WARN("can't find .symtab");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
s = elf_getscn(elf->elf, symtab->idx);
|
||||||
|
if (!s) {
|
||||||
|
WARN_ELF("elf_getscn");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
data = elf_newdata(s);
|
||||||
|
if (!data) {
|
||||||
|
WARN_ELF("elf_newdata");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
data->d_buf = &sym->sym;
|
||||||
|
data->d_size = sizeof(sym->sym);
|
||||||
|
data->d_align = 1;
|
||||||
|
|
||||||
|
sym->idx = symtab->len / sizeof(sym->sym);
|
||||||
|
|
||||||
|
symtab->len += data->d_size;
|
||||||
|
symtab->changed = true;
|
||||||
|
|
||||||
|
sym->sec = find_section_by_index(elf, 0);
|
||||||
|
|
||||||
|
elf_add_symbol(elf, sym);
|
||||||
|
|
||||||
|
return sym;
|
||||||
|
}
|
||||||
|
|
||||||
struct section *elf_create_section(struct elf *elf, const char *name,
|
struct section *elf_create_section(struct elf *elf, const char *name,
|
||||||
unsigned int sh_flags, size_t entsize, int nr)
|
unsigned int sh_flags, size_t entsize, int nr)
|
||||||
{
|
{
|
||||||
struct section *sec, *shstrtab;
|
struct section *sec, *shstrtab;
|
||||||
size_t size = entsize * nr;
|
size_t size = entsize * nr;
|
||||||
Elf_Scn *s;
|
Elf_Scn *s;
|
||||||
Elf_Data *data;
|
|
||||||
|
|
||||||
sec = malloc(sizeof(*sec));
|
sec = malloc(sizeof(*sec));
|
||||||
if (!sec) {
|
if (!sec) {
|
||||||
@@ -699,7 +837,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
|
|||||||
sec->sh.sh_addralign = 1;
|
sec->sh.sh_addralign = 1;
|
||||||
sec->sh.sh_flags = SHF_ALLOC | sh_flags;
|
sec->sh.sh_flags = SHF_ALLOC | sh_flags;
|
||||||
|
|
||||||
|
|
||||||
/* Add section name to .shstrtab (or .strtab for Clang) */
|
/* Add section name to .shstrtab (or .strtab for Clang) */
|
||||||
shstrtab = find_section_by_name(elf, ".shstrtab");
|
shstrtab = find_section_by_name(elf, ".shstrtab");
|
||||||
if (!shstrtab)
|
if (!shstrtab)
|
||||||
@@ -708,27 +845,9 @@ struct section *elf_create_section(struct elf *elf, const char *name,
|
|||||||
WARN("can't find .shstrtab or .strtab section");
|
WARN("can't find .shstrtab or .strtab section");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
|
||||||
s = elf_getscn(elf->elf, shstrtab->idx);
|
if (sec->sh.sh_name == -1)
|
||||||
if (!s) {
|
|
||||||
WARN_ELF("elf_getscn");
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
data = elf_newdata(s);
|
|
||||||
if (!data) {
|
|
||||||
WARN_ELF("elf_newdata");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
data->d_buf = sec->name;
|
|
||||||
data->d_size = strlen(name) + 1;
|
|
||||||
data->d_align = 1;
|
|
||||||
|
|
||||||
sec->sh.sh_name = shstrtab->len;
|
|
||||||
|
|
||||||
shstrtab->len += strlen(name) + 1;
|
|
||||||
shstrtab->changed = true;
|
|
||||||
|
|
||||||
list_add_tail(&sec->list, &elf->sections);
|
list_add_tail(&sec->list, &elf->sections);
|
||||||
elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
|
elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
|
||||||
@@ -799,7 +918,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
|
|||||||
return sec;
|
return sec;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct section *elf_create_reloc_section(struct elf *elf,
|
static struct section *elf_create_reloc_section(struct elf *elf,
|
||||||
struct section *base,
|
struct section *base,
|
||||||
int reltype)
|
int reltype)
|
||||||
{
|
{
|
||||||
@@ -873,14 +992,11 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
|
static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
|
||||||
{
|
{
|
||||||
struct reloc *reloc;
|
struct reloc *reloc;
|
||||||
int nr;
|
int nr;
|
||||||
|
|
||||||
sec->changed = true;
|
|
||||||
elf->changed = true;
|
|
||||||
|
|
||||||
nr = 0;
|
nr = 0;
|
||||||
list_for_each_entry(reloc, &sec->reloc_list, list)
|
list_for_each_entry(reloc, &sec->reloc_list, list)
|
||||||
nr++;
|
nr++;
|
||||||
@@ -944,9 +1060,15 @@ int elf_write(struct elf *elf)
|
|||||||
struct section *sec;
|
struct section *sec;
|
||||||
Elf_Scn *s;
|
Elf_Scn *s;
|
||||||
|
|
||||||
/* Update section headers for changed sections: */
|
/* Update changed relocation sections and section headers: */
|
||||||
list_for_each_entry(sec, &elf->sections, list) {
|
list_for_each_entry(sec, &elf->sections, list) {
|
||||||
if (sec->changed) {
|
if (sec->changed) {
|
||||||
|
if (sec->base &&
|
||||||
|
elf_rebuild_reloc_section(elf, sec)) {
|
||||||
|
WARN("elf_rebuild_reloc_section");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
s = elf_getscn(elf->elf, sec->idx);
|
s = elf_getscn(elf->elf, sec->idx);
|
||||||
if (!s) {
|
if (!s) {
|
||||||
WARN_ELF("elf_getscn");
|
WARN_ELF("elf_getscn");
|
||||||
@@ -958,6 +1080,7 @@ int elf_write(struct elf *elf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sec->changed = false;
|
sec->changed = false;
|
||||||
|
elf->changed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -86,4 +86,8 @@ const char *arch_nop_insn(int len);
|
|||||||
|
|
||||||
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg);
|
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg);
|
||||||
|
|
||||||
|
bool arch_is_retpoline(struct symbol *sym);
|
||||||
|
|
||||||
|
int arch_rewrite_retpolines(struct objtool_file *file);
|
||||||
|
|
||||||
#endif /* _ARCH_H */
|
#endif /* _ARCH_H */
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ struct alt_group {
|
|||||||
struct instruction {
|
struct instruction {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct hlist_node hash;
|
struct hlist_node hash;
|
||||||
struct list_head static_call_node;
|
struct list_head call_node;
|
||||||
struct list_head mcount_loc_node;
|
struct list_head mcount_loc_node;
|
||||||
struct section *sec;
|
struct section *sec;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
@@ -56,6 +56,7 @@ struct instruction {
|
|||||||
struct instruction *jump_dest;
|
struct instruction *jump_dest;
|
||||||
struct instruction *first_jump_src;
|
struct instruction *first_jump_src;
|
||||||
struct reloc *jump_table;
|
struct reloc *jump_table;
|
||||||
|
struct reloc *reloc;
|
||||||
struct list_head alts;
|
struct list_head alts;
|
||||||
struct symbol *func;
|
struct symbol *func;
|
||||||
struct list_head stack_ops;
|
struct list_head stack_ops;
|
||||||
|
|||||||
@@ -122,12 +122,18 @@ static inline u32 reloc_hash(struct reloc *reloc)
|
|||||||
|
|
||||||
struct elf *elf_open_read(const char *name, int flags);
|
struct elf *elf_open_read(const char *name, int flags);
|
||||||
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
|
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
|
||||||
struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype);
|
|
||||||
void elf_add_reloc(struct elf *elf, struct reloc *reloc);
|
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
|
||||||
|
unsigned int type, struct symbol *sym, int addend);
|
||||||
|
int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
|
||||||
|
unsigned long offset, unsigned int type,
|
||||||
|
struct section *insn_sec, unsigned long insn_off);
|
||||||
|
|
||||||
int elf_write_insn(struct elf *elf, struct section *sec,
|
int elf_write_insn(struct elf *elf, struct section *sec,
|
||||||
unsigned long offset, unsigned int len,
|
unsigned long offset, unsigned int len,
|
||||||
const char *insn);
|
const char *insn);
|
||||||
int elf_write_reloc(struct elf *elf, struct reloc *reloc);
|
int elf_write_reloc(struct elf *elf, struct reloc *reloc);
|
||||||
|
struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name);
|
||||||
int elf_write(struct elf *elf);
|
int elf_write(struct elf *elf);
|
||||||
void elf_close(struct elf *elf);
|
void elf_close(struct elf *elf);
|
||||||
|
|
||||||
@@ -140,9 +146,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
|
|||||||
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
|
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
|
||||||
unsigned long offset, unsigned int len);
|
unsigned long offset, unsigned int len);
|
||||||
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
|
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
|
||||||
void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
|
|
||||||
struct reloc *reloc);
|
|
||||||
int elf_rebuild_reloc_section(struct elf *elf, struct section *sec);
|
|
||||||
|
|
||||||
#define for_each_sec(file, sec) \
|
#define for_each_sec(file, sec) \
|
||||||
list_for_each_entry(sec, &file->elf->sections, list)
|
list_for_each_entry(sec, &file->elf->sections, list)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ struct objtool_file {
|
|||||||
struct elf *elf;
|
struct elf *elf;
|
||||||
struct list_head insn_list;
|
struct list_head insn_list;
|
||||||
DECLARE_HASHTABLE(insn_hash, 20);
|
DECLARE_HASHTABLE(insn_hash, 20);
|
||||||
|
struct list_head retpoline_call_list;
|
||||||
struct list_head static_call_list;
|
struct list_head static_call_list;
|
||||||
struct list_head mcount_loc_list;
|
struct list_head mcount_loc_list;
|
||||||
bool ignore_unreachables, c_file, hints, rodata;
|
bool ignore_unreachables, c_file, hints, rodata;
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ struct objtool_file *objtool_open_read(const char *_objname)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&file.insn_list);
|
INIT_LIST_HEAD(&file.insn_list);
|
||||||
hash_init(file.insn_hash);
|
hash_init(file.insn_hash);
|
||||||
|
INIT_LIST_HEAD(&file.retpoline_call_list);
|
||||||
INIT_LIST_HEAD(&file.static_call_list);
|
INIT_LIST_HEAD(&file.static_call_list);
|
||||||
INIT_LIST_HEAD(&file.mcount_loc_list);
|
INIT_LIST_HEAD(&file.mcount_loc_list);
|
||||||
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
|
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
|
||||||
|
|||||||
@@ -82,12 +82,11 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int write_orc_entry(struct elf *elf, struct section *orc_sec,
|
static int write_orc_entry(struct elf *elf, struct section *orc_sec,
|
||||||
struct section *ip_rsec, unsigned int idx,
|
struct section *ip_sec, unsigned int idx,
|
||||||
struct section *insn_sec, unsigned long insn_off,
|
struct section *insn_sec, unsigned long insn_off,
|
||||||
struct orc_entry *o)
|
struct orc_entry *o)
|
||||||
{
|
{
|
||||||
struct orc_entry *orc;
|
struct orc_entry *orc;
|
||||||
struct reloc *reloc;
|
|
||||||
|
|
||||||
/* populate ORC data */
|
/* populate ORC data */
|
||||||
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
|
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
|
||||||
@@ -96,25 +95,9 @@ static int write_orc_entry(struct elf *elf, struct section *orc_sec,
|
|||||||
orc->bp_offset = bswap_if_needed(orc->bp_offset);
|
orc->bp_offset = bswap_if_needed(orc->bp_offset);
|
||||||
|
|
||||||
/* populate reloc for ip */
|
/* populate reloc for ip */
|
||||||
reloc = malloc(sizeof(*reloc));
|
if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32,
|
||||||
if (!reloc) {
|
insn_sec, insn_off))
|
||||||
perror("malloc");
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
|
||||||
memset(reloc, 0, sizeof(*reloc));
|
|
||||||
|
|
||||||
insn_to_reloc_sym_addend(insn_sec, insn_off, reloc);
|
|
||||||
if (!reloc->sym) {
|
|
||||||
WARN("missing symbol for insn at offset 0x%lx",
|
|
||||||
insn_off);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
reloc->type = R_X86_64_PC32;
|
|
||||||
reloc->offset = idx * sizeof(int);
|
|
||||||
reloc->sec = ip_rsec;
|
|
||||||
|
|
||||||
elf_add_reloc(elf, reloc);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -153,7 +136,7 @@ static unsigned long alt_group_len(struct alt_group *alt_group)
|
|||||||
|
|
||||||
int orc_create(struct objtool_file *file)
|
int orc_create(struct objtool_file *file)
|
||||||
{
|
{
|
||||||
struct section *sec, *ip_rsec, *orc_sec;
|
struct section *sec, *orc_sec;
|
||||||
unsigned int nr = 0, idx = 0;
|
unsigned int nr = 0, idx = 0;
|
||||||
struct orc_list_entry *entry;
|
struct orc_list_entry *entry;
|
||||||
struct list_head orc_list;
|
struct list_head orc_list;
|
||||||
@@ -242,20 +225,14 @@ int orc_create(struct objtool_file *file)
|
|||||||
sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), nr);
|
sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), nr);
|
||||||
if (!sec)
|
if (!sec)
|
||||||
return -1;
|
return -1;
|
||||||
ip_rsec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
|
|
||||||
if (!ip_rsec)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
/* Write ORC entries to sections: */
|
/* Write ORC entries to sections: */
|
||||||
list_for_each_entry(entry, &orc_list, list) {
|
list_for_each_entry(entry, &orc_list, list) {
|
||||||
if (write_orc_entry(file->elf, orc_sec, ip_rsec, idx++,
|
if (write_orc_entry(file->elf, orc_sec, sec, idx++,
|
||||||
entry->insn_sec, entry->insn_off,
|
entry->insn_sec, entry->insn_off,
|
||||||
&entry->orc))
|
&entry->orc))
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (elf_rebuild_reloc_section(file->elf, ip_rsec))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -106,6 +106,14 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Skip retpoline .altinstr_replacement... we already rewrite the
|
||||||
|
* instructions for retpolines anyway, see arch_is_retpoline()
|
||||||
|
* usage in add_{call,jump}_destinations().
|
||||||
|
*/
|
||||||
|
if (arch_is_retpoline(new_reloc->sym))
|
||||||
|
return 1;
|
||||||
|
|
||||||
alt->new_sec = new_reloc->sym->sec;
|
alt->new_sec = new_reloc->sym->sec;
|
||||||
alt->new_off = (unsigned int)new_reloc->addend;
|
alt->new_off = (unsigned int)new_reloc->addend;
|
||||||
|
|
||||||
@@ -154,7 +162,9 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
|
|||||||
memset(alt, 0, sizeof(*alt));
|
memset(alt, 0, sizeof(*alt));
|
||||||
|
|
||||||
ret = get_alt_entry(elf, entry, sec, idx, alt);
|
ret = get_alt_entry(elf, entry, sec, idx, alt);
|
||||||
if (ret)
|
if (ret > 0)
|
||||||
|
continue;
|
||||||
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
list_add_tail(&alt->list, alts);
|
list_add_tail(&alt->list, alts);
|
||||||
|
|||||||
@@ -10,17 +10,21 @@ FILES="include/linux/objtool.h"
|
|||||||
|
|
||||||
if [ "$SRCARCH" = "x86" ]; then
|
if [ "$SRCARCH" = "x86" ]; then
|
||||||
FILES="$FILES
|
FILES="$FILES
|
||||||
|
arch/x86/include/asm/nops.h
|
||||||
arch/x86/include/asm/inat_types.h
|
arch/x86/include/asm/inat_types.h
|
||||||
arch/x86/include/asm/orc_types.h
|
arch/x86/include/asm/orc_types.h
|
||||||
arch/x86/include/asm/emulate_prefix.h
|
arch/x86/include/asm/emulate_prefix.h
|
||||||
arch/x86/lib/x86-opcode-map.txt
|
arch/x86/lib/x86-opcode-map.txt
|
||||||
arch/x86/tools/gen-insn-attr-x86.awk
|
arch/x86/tools/gen-insn-attr-x86.awk
|
||||||
include/linux/static_call_types.h
|
include/linux/static_call_types.h
|
||||||
arch/x86/include/asm/inat.h -I '^#include [\"<]\(asm/\)*inat_types.h[\">]'
|
|
||||||
arch/x86/include/asm/insn.h -I '^#include [\"<]\(asm/\)*inat.h[\">]'
|
|
||||||
arch/x86/lib/inat.c -I '^#include [\"<]\(../include/\)*asm/insn.h[\">]'
|
|
||||||
arch/x86/lib/insn.c -I '^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]' -I '^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]'
|
|
||||||
"
|
"
|
||||||
|
|
||||||
|
SYNC_CHECK_FILES='
|
||||||
|
arch/x86/include/asm/inat.h
|
||||||
|
arch/x86/include/asm/insn.h
|
||||||
|
arch/x86/lib/inat.c
|
||||||
|
arch/x86/lib/insn.c
|
||||||
|
'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
check_2 () {
|
check_2 () {
|
||||||
@@ -63,3 +67,9 @@ while read -r file_entry; do
|
|||||||
done <<EOF
|
done <<EOF
|
||||||
$FILES
|
$FILES
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
if [ "$SRCARCH" = "x86" ]; then
|
||||||
|
for i in $SYNC_CHECK_FILES; do
|
||||||
|
check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|||||||
@@ -96,13 +96,12 @@ static int get_branch(const char *branch_str)
|
|||||||
static int test_data_item(struct test_data *dat, int x86_64)
|
static int test_data_item(struct test_data *dat, int x86_64)
|
||||||
{
|
{
|
||||||
struct intel_pt_insn intel_pt_insn;
|
struct intel_pt_insn intel_pt_insn;
|
||||||
|
int op, branch, ret;
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
int op, branch;
|
|
||||||
|
|
||||||
insn_init(&insn, dat->data, MAX_INSN_SIZE, x86_64);
|
ret = insn_decode(&insn, dat->data, MAX_INSN_SIZE,
|
||||||
insn_get_length(&insn);
|
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
|
||||||
|
if (ret < 0) {
|
||||||
if (!insn_complete(&insn)) {
|
|
||||||
pr_debug("Failed to decode: %s\n", dat->asm_rep);
|
pr_debug("Failed to decode: %s\n", dat->asm_rep);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ void arch_fetch_insn(struct perf_sample *sample,
|
|||||||
struct machine *machine)
|
struct machine *machine)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
int len;
|
int len, ret;
|
||||||
bool is64bit = false;
|
bool is64bit = false;
|
||||||
|
|
||||||
if (!sample->ip)
|
if (!sample->ip)
|
||||||
@@ -19,8 +19,9 @@ void arch_fetch_insn(struct perf_sample *sample,
|
|||||||
len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
|
len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
|
||||||
if (len <= 0)
|
if (len <= 0)
|
||||||
return;
|
return;
|
||||||
insn_init(&insn, sample->insn, len, is64bit);
|
|
||||||
insn_get_length(&insn);
|
ret = insn_decode(&insn, sample->insn, len,
|
||||||
if (insn_complete(&insn) && insn.length <= len)
|
is64bit ? INSN_MODE_64 : INSN_MODE_32);
|
||||||
|
if (ret >= 0 && insn.length <= len)
|
||||||
sample->insn_len = insn.length;
|
sample->insn_len = insn.length;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,6 +75,13 @@ include/uapi/asm-generic/mman-common.h
|
|||||||
include/uapi/asm-generic/unistd.h
|
include/uapi/asm-generic/unistd.h
|
||||||
'
|
'
|
||||||
|
|
||||||
|
SYNC_CHECK_FILES='
|
||||||
|
arch/x86/include/asm/inat.h
|
||||||
|
arch/x86/include/asm/insn.h
|
||||||
|
arch/x86/lib/inat.c
|
||||||
|
arch/x86/lib/insn.c
|
||||||
|
'
|
||||||
|
|
||||||
# These copies are under tools/perf/trace/beauty/ as they are not used to in
|
# These copies are under tools/perf/trace/beauty/ as they are not used to in
|
||||||
# building object files only by scripts in tools/perf/trace/beauty/ to generate
|
# building object files only by scripts in tools/perf/trace/beauty/ to generate
|
||||||
# tables that then gets included in .c files for things like id->string syscall
|
# tables that then gets included in .c files for things like id->string syscall
|
||||||
@@ -129,6 +136,10 @@ for i in $FILES; do
|
|||||||
check $i -B
|
check $i -B
|
||||||
done
|
done
|
||||||
|
|
||||||
|
for i in $SYNC_CHECK_FILES; do
|
||||||
|
check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
|
||||||
|
done
|
||||||
|
|
||||||
# diff with extra ignore lines
|
# diff with extra ignore lines
|
||||||
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
|
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
|
||||||
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
|
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
|
||||||
@@ -137,10 +148,6 @@ check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
|
|||||||
check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
|
check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
|
||||||
check include/linux/ctype.h '-I "isdigit("'
|
check include/linux/ctype.h '-I "isdigit("'
|
||||||
check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
|
check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
|
||||||
check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
|
|
||||||
check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
|
|
||||||
check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
|
|
||||||
check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
|
|
||||||
|
|
||||||
# diff non-symmetric files
|
# diff non-symmetric files
|
||||||
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
|
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
|
||||||
|
|||||||
@@ -169,11 +169,13 @@ int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
|
|||||||
struct intel_pt_insn *intel_pt_insn)
|
struct intel_pt_insn *intel_pt_insn)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
|
int ret;
|
||||||
|
|
||||||
insn_init(&insn, buf, len, x86_64);
|
ret = insn_decode(&insn, buf, len,
|
||||||
insn_get_length(&insn);
|
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
|
||||||
if (!insn_complete(&insn) || insn.length > len)
|
if (ret < 0 || insn.length > len)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
intel_pt_insn_decoder(&insn, intel_pt_insn);
|
intel_pt_insn_decoder(&insn, intel_pt_insn);
|
||||||
if (insn.length < INTEL_PT_INSN_BUF_SZ)
|
if (insn.length < INTEL_PT_INSN_BUF_SZ)
|
||||||
memcpy(intel_pt_insn->buf, buf, insn.length);
|
memcpy(intel_pt_insn->buf, buf, insn.length);
|
||||||
@@ -194,12 +196,13 @@ const char *dump_insn(struct perf_insn *x, uint64_t ip __maybe_unused,
|
|||||||
u8 *inbuf, int inlen, int *lenp)
|
u8 *inbuf, int inlen, int *lenp)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
int n, i;
|
int n, i, ret;
|
||||||
int left;
|
int left;
|
||||||
|
|
||||||
insn_init(&insn, inbuf, inlen, x->is64bit);
|
ret = insn_decode(&insn, inbuf, inlen,
|
||||||
insn_get_length(&insn);
|
x->is64bit ? INSN_MODE_64 : INSN_MODE_32);
|
||||||
if (!insn_complete(&insn) || insn.length > inlen)
|
|
||||||
|
if (ret < 0 || insn.length > inlen)
|
||||||
return "<bad>";
|
return "<bad>";
|
||||||
if (lenp)
|
if (lenp)
|
||||||
*lenp = insn.length;
|
*lenp = insn.length;
|
||||||
|
|||||||
Reference in New Issue
Block a user