2020-08-18 13:57:44 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/static_call.h>
|
|
|
|
#include <linux/memory.h>
|
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <asm/text-patching.h>
|
|
|
|
|
2020-08-18 13:57:48 +00:00
|
|
|
enum insn_type {
|
|
|
|
CALL = 0, /* site call */
|
|
|
|
NOP = 1, /* site cond-call */
|
|
|
|
JMP = 2, /* tramp / site tail-call */
|
|
|
|
RET = 3, /* tramp / site cond-tail-call */
|
2023-01-26 15:34:27 +00:00
|
|
|
JCC = 4,
|
2020-08-18 13:57:48 +00:00
|
|
|
};
|
|
|
|
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
/*
|
|
|
|
* ud1 %esp, %ecx - a 3 byte #UD that is unique to trampolines, chosen such
|
|
|
|
* that there is no false-positive trampoline identification while also being a
|
|
|
|
* speculation stop.
|
|
|
|
*/
|
|
|
|
static const u8 tramp_ud[] = { 0x0f, 0xb9, 0xcc };
|
|
|
|
|
2021-01-18 14:12:16 +00:00
|
|
|
/*
|
2022-03-18 20:24:38 +00:00
|
|
|
* cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
|
2021-01-18 14:12:16 +00:00
|
|
|
*/
|
2022-03-18 20:24:38 +00:00
|
|
|
static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
|
2021-01-18 14:12:16 +00:00
|
|
|
|
2021-12-04 13:43:44 +00:00
|
|
|
static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
|
|
|
|
|
2023-01-26 15:34:27 +00:00
|
|
|
static u8 __is_Jcc(u8 *insn) /* Jcc.d32 */
|
|
|
|
{
|
|
|
|
u8 ret = 0;
|
|
|
|
|
|
|
|
if (insn[0] == 0x0f) {
|
|
|
|
u8 tmp = insn[1];
|
|
|
|
if ((tmp & 0xf0) == 0x80)
|
|
|
|
ret = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void __static_call_return(void);
|
|
|
|
|
|
|
|
asm (".global __static_call_return\n\t"
|
|
|
|
".type __static_call_return, @function\n\t"
|
|
|
|
ASM_FUNC_ALIGN "\n\t"
|
|
|
|
"__static_call_return:\n\t"
|
|
|
|
ANNOTATE_NOENDBR
|
|
|
|
ANNOTATE_RETPOLINE_SAFE
|
|
|
|
"ret; int3\n\t"
|
|
|
|
".size __static_call_return, . - __static_call_return \n\t");
|
|
|
|
|
2022-07-12 12:01:06 +00:00
|
|
|
static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|
|
|
void *func, bool modinit)
|
2020-08-18 13:57:44 +00:00
|
|
|
{
|
2021-01-18 14:12:16 +00:00
|
|
|
const void *emulate = NULL;
|
2020-08-18 13:57:48 +00:00
|
|
|
int size = CALL_INSN_SIZE;
|
|
|
|
const void *code;
|
2023-01-26 15:34:27 +00:00
|
|
|
u8 op, buf[6];
|
|
|
|
|
|
|
|
if ((type == JMP || type == RET) && (op = __is_Jcc(insn)))
|
|
|
|
type = JCC;
|
2020-08-18 13:57:44 +00:00
|
|
|
|
2020-08-18 13:57:48 +00:00
|
|
|
switch (type) {
|
|
|
|
case CALL:
|
2022-09-15 11:11:31 +00:00
|
|
|
func = callthunks_translate_call_dest(func);
|
2020-08-18 13:57:48 +00:00
|
|
|
code = text_gen_insn(CALL_INSN_OPCODE, insn, func);
|
2021-01-18 14:12:16 +00:00
|
|
|
if (func == &__static_call_return0) {
|
|
|
|
emulate = code;
|
|
|
|
code = &xor5rax;
|
|
|
|
}
|
|
|
|
|
2020-08-18 13:57:48 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NOP:
|
x86: Remove dynamic NOP selection
This ensures that a NOP is a NOP and not a random other instruction that
is also a NOP. It allows simplification of dynamic code patching that
wants to verify existing code before writing new instructions (ftrace,
jump_label, static_call, etc..).
Differentiating on NOPs is not a feature.
This pessimises 32bit (DONTCARE) and 32bit on 64bit CPUs (CARELESS).
32bit is not a performance target.
Everything x86_64 since AMD K10 (2007) and Intel IvyBridge (2012) is
fine with using NOPL (as opposed to prefix NOP). And per FEATURE_NOPL
being required for x86_64, all x86_64 CPUs can use NOPL. So stop
caring about NOPs, simplify things and get on with life.
[ The problem seems to be that some uarchs can only decode NOPL on a
single front-end port while others have severe decode penalties for
excessive prefixes. All modern uarchs can handle both, except Atom,
which has prefix penalties. ]
[ Also, much doubt you can actually measure any of this on normal
workloads. ]
After this, FEATURE_NOPL is unused except for required-features for
x86_64. FEATURE_K8 is only used for PTI.
[ bp: Kernel build measurements showed ~0.3s slowdown on Sandybridge
which is hardly a slowdown. Get rid of X86_FEATURE_K7, while at it. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> # bpf
Acked-by: Linus Torvalds <torvalds@linuxfoundation.org>
Link: https://lkml.kernel.org/r/20210312115749.065275711@infradead.org
2021-03-12 11:32:54 +00:00
|
|
|
code = x86_nops[5];
|
2020-08-18 13:57:48 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case JMP:
|
|
|
|
code = text_gen_insn(JMP32_INSN_OPCODE, insn, func);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RET:
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
2022-09-15 11:11:25 +00:00
|
|
|
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
else
|
|
|
|
code = &retinsn;
|
2020-08-18 13:57:48 +00:00
|
|
|
break;
|
2023-01-26 15:34:27 +00:00
|
|
|
|
|
|
|
case JCC:
|
|
|
|
if (!func) {
|
|
|
|
func = __static_call_return;
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
|
|
|
func = x86_return_thunk;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf[0] = 0x0f;
|
|
|
|
__text_gen_insn(buf+1, op, insn+1, func, 5);
|
|
|
|
code = buf;
|
|
|
|
size = 6;
|
|
|
|
|
|
|
|
break;
|
2020-08-18 13:57:48 +00:00
|
|
|
}
|
2020-08-18 13:57:44 +00:00
|
|
|
|
2020-08-18 13:57:48 +00:00
|
|
|
if (memcmp(insn, code, size) == 0)
|
2020-08-18 13:57:44 +00:00
|
|
|
return;
|
|
|
|
|
2022-07-12 12:01:06 +00:00
|
|
|
if (system_state == SYSTEM_BOOTING || modinit)
|
2020-08-18 13:57:51 +00:00
|
|
|
return text_poke_early(insn, code, size);
|
|
|
|
|
2021-01-18 14:12:16 +00:00
|
|
|
text_poke_bp(insn, code, size, emulate);
|
2020-08-18 13:57:44 +00:00
|
|
|
}
|
|
|
|
|
2023-01-26 15:34:27 +00:00
|
|
|
static void __static_call_validate(u8 *insn, bool tail, bool tramp)
|
2020-08-18 13:57:50 +00:00
|
|
|
{
|
2023-01-26 15:34:27 +00:00
|
|
|
u8 opcode = insn[0];
|
2020-08-18 13:57:50 +00:00
|
|
|
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
if (tramp && memcmp(insn+5, tramp_ud, 3)) {
|
2021-10-30 07:47:58 +00:00
|
|
|
pr_err("trampoline signature fail");
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2020-08-18 13:57:50 +00:00
|
|
|
if (tail) {
|
|
|
|
if (opcode == JMP32_INSN_OPCODE ||
|
2023-01-26 15:34:27 +00:00
|
|
|
opcode == RET_INSN_OPCODE ||
|
|
|
|
__is_Jcc(insn))
|
2020-08-18 13:57:50 +00:00
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
if (opcode == CALL_INSN_OPCODE ||
|
x86: Remove dynamic NOP selection
This ensures that a NOP is a NOP and not a random other instruction that
is also a NOP. It allows simplification of dynamic code patching that
wants to verify existing code before writing new instructions (ftrace,
jump_label, static_call, etc..).
Differentiating on NOPs is not a feature.
This pessimises 32bit (DONTCARE) and 32bit on 64bit CPUs (CARELESS).
32bit is not a performance target.
Everything x86_64 since AMD K10 (2007) and Intel IvyBridge (2012) is
fine with using NOPL (as opposed to prefix NOP). And per FEATURE_NOPL
being required for x86_64, all x86_64 CPUs can use NOPL. So stop
caring about NOPs, simplify things and get on with life.
[ The problem seems to be that some uarchs can only decode NOPL on a
single front-end port while others have severe decode penalties for
excessive prefixes. All modern uarchs can handle both, except Atom,
which has prefix penalties. ]
[ Also, much doubt you can actually measure any of this on normal
workloads. ]
After this, FEATURE_NOPL is unused except for required-features for
x86_64. FEATURE_K8 is only used for PTI.
[ bp: Kernel build measurements showed ~0.3s slowdown on Sandybridge
which is hardly a slowdown. Get rid of X86_FEATURE_K7, while at it. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> # bpf
Acked-by: Linus Torvalds <torvalds@linuxfoundation.org>
Link: https://lkml.kernel.org/r/20210312115749.065275711@infradead.org
2021-03-12 11:32:54 +00:00
|
|
|
!memcmp(insn, x86_nops[5], 5) ||
|
2021-01-18 14:12:16 +00:00
|
|
|
!memcmp(insn, xor5rax, 5))
|
2020-08-18 13:57:50 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we ever trigger this, our text is corrupt, we'll probably not live long.
|
|
|
|
*/
|
2021-10-30 07:47:58 +00:00
|
|
|
pr_err("unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
|
|
|
|
BUG();
|
2020-08-18 13:57:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-18 13:57:49 +00:00
|
|
|
static inline enum insn_type __sc_insn(bool null, bool tail)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Encode the following table without branches:
|
|
|
|
*
|
|
|
|
* tail null insn
|
|
|
|
* -----+-------+------
|
|
|
|
* 0 | 0 | CALL
|
|
|
|
* 0 | 1 | NOP
|
|
|
|
* 1 | 0 | JMP
|
|
|
|
* 1 | 1 | RET
|
|
|
|
*/
|
|
|
|
return 2*tail + null;
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
|
2020-08-18 13:57:44 +00:00
|
|
|
{
|
|
|
|
mutex_lock(&text_mutex);
|
|
|
|
|
2020-08-18 13:57:50 +00:00
|
|
|
if (tramp) {
|
2021-10-30 07:47:58 +00:00
|
|
|
__static_call_validate(tramp, true, true);
|
2022-07-12 12:01:06 +00:00
|
|
|
__static_call_transform(tramp, __sc_insn(!func, true), func, false);
|
2020-08-18 13:57:50 +00:00
|
|
|
}
|
2020-08-18 13:57:44 +00:00
|
|
|
|
2020-08-18 13:57:50 +00:00
|
|
|
if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
|
2021-10-30 07:47:58 +00:00
|
|
|
__static_call_validate(site, tail, false);
|
2022-07-12 12:01:06 +00:00
|
|
|
__static_call_transform(site, __sc_insn(!func, tail), func, false);
|
2020-08-18 13:57:50 +00:00
|
|
|
}
|
2020-08-18 13:57:45 +00:00
|
|
|
|
2020-08-18 13:57:44 +00:00
|
|
|
mutex_unlock(&text_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(arch_static_call_transform);
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
|
2022-06-27 22:21:17 +00:00
|
|
|
#ifdef CONFIG_RETHUNK
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
/*
|
|
|
|
* This is called by apply_returns() to fix up static call trampolines,
|
|
|
|
* specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as
|
|
|
|
* having a return trampoline.
|
|
|
|
*
|
|
|
|
* The problem is that static_call() is available before determining
|
|
|
|
* X86_FEATURE_RETHUNK and, by implication, running alternatives.
|
|
|
|
*
|
|
|
|
* This means that __static_call_transform() above can have overwritten the
|
|
|
|
* return trampoline and we now need to fix things up to be consistent.
|
|
|
|
*/
|
|
|
|
bool __static_call_fixup(void *tramp, u8 op, void *dest)
|
|
|
|
{
|
2023-08-16 10:44:19 +00:00
|
|
|
unsigned long addr = (unsigned long)tramp;
|
|
|
|
/*
|
|
|
|
* Not all .return_sites are a static_call trampoline (most are not).
|
|
|
|
* Check if the 3 bytes after the return are still kernel text, if not,
|
|
|
|
* then this definitely is not a trampoline and we need not worry
|
|
|
|
* further.
|
|
|
|
*
|
|
|
|
* This avoids the memcmp() below tripping over pagefaults etc..
|
|
|
|
*/
|
|
|
|
if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
|
|
|
|
!kernel_text_address(addr + 7))
|
|
|
|
return false;
|
|
|
|
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
if (memcmp(tramp+5, tramp_ud, 3)) {
|
|
|
|
/* Not a trampoline site, not our problem. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-07-12 12:01:06 +00:00
|
|
|
mutex_lock(&text_mutex);
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
if (op == RET_INSN_OPCODE || dest == &__x86_return_thunk)
|
2022-07-12 12:01:06 +00:00
|
|
|
__static_call_transform(tramp, RET, NULL, true);
|
|
|
|
mutex_unlock(&text_mutex);
|
x86,static_call: Use alternative RET encoding
In addition to teaching static_call about the new way to spell 'RET',
there is an added complication in that static_call() is allowed to
rewrite text before it is known which particular spelling is required.
In order to deal with this; have a static_call specific fixup in the
apply_return() 'alternative' patching routine that will rewrite the
static_call trampoline to match the definite sequence.
This in turn creates the problem of uniquely identifying static call
trampolines. Currently trampolines are 8 bytes, the first 5 being the
jmp.d32/ret sequence and the final 3 a byte sequence that spells out
'SCT'.
This sequence is used in __static_call_validate() to ensure it is
patching a trampoline and not a random other jmp.d32. That is,
false-positives shouldn't be plenty, but aren't a big concern.
OTOH the new __static_call_fixup() must not have false-positives, and
'SCT' decodes to the somewhat weird but semi plausible sequence:
push %rbx
rex.XB push %r12
Additionally, there are SLS concerns with immediate jumps. Combined it
seems like a good moment to change the signature to a single 3 byte
trap instruction that is unique to this usage and will not ever get
generated by accident.
As such, change the signature to: '0x0f, 0xb9, 0xcc', which decodes
to:
ud1 %esp, %ecx
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
2022-06-14 21:15:39 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|