x86/alternatives: Add int3_emulate_call() selftest
Given that the entry_*.S changes for this functionality are somewhat tricky, make sure the paths are tested every boot, instead of on the rare occasion when we trip an INT3 while rewriting text. Requested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
faeedb0679
commit
7457c0da02
@ -615,11 +615,83 @@ extern struct paravirt_patch_site __start_parainstructions[],
|
|||||||
__stop_parainstructions[];
|
__stop_parainstructions[];
|
||||||
#endif /* CONFIG_PARAVIRT */
|
#endif /* CONFIG_PARAVIRT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Self-test for the INT3 based CALL emulation code.
|
||||||
|
*
|
||||||
|
* This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
|
||||||
|
* properly and that there is a stack gap between the INT3 frame and the
|
||||||
|
* previous context. Without this gap doing a virtual PUSH on the interrupted
|
||||||
|
* stack would corrupt the INT3 IRET frame.
|
||||||
|
*
|
||||||
|
* See entry_{32,64}.S for more details.
|
||||||
|
*/
|
||||||
|
static void __init int3_magic(unsigned int *ptr)
|
||||||
|
{
|
||||||
|
*ptr = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
|
||||||
|
|
||||||
|
static int __init
|
||||||
|
int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
|
||||||
|
{
|
||||||
|
struct die_args *args = data;
|
||||||
|
struct pt_regs *regs = args->regs;
|
||||||
|
|
||||||
|
if (!regs || user_mode(regs))
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
if (val != DIE_INT3)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
int3_emulate_call(regs, (unsigned long)&int3_magic);
|
||||||
|
return NOTIFY_STOP;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init int3_selftest(void)
|
||||||
|
{
|
||||||
|
static __initdata struct notifier_block int3_exception_nb = {
|
||||||
|
.notifier_call = int3_exception_notify,
|
||||||
|
.priority = INT_MAX-1, /* last */
|
||||||
|
};
|
||||||
|
unsigned int val = 0;
|
||||||
|
|
||||||
|
BUG_ON(register_die_notifier(&int3_exception_nb));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Basically: int3_magic(&val); but really complicated :-)
|
||||||
|
*
|
||||||
|
* Stick the address of the INT3 instruction into int3_selftest_ip,
|
||||||
|
* then trigger the INT3, padded with NOPs to match a CALL instruction
|
||||||
|
* length.
|
||||||
|
*/
|
||||||
|
asm volatile ("1: int3; nop; nop; nop; nop\n\t"
|
||||||
|
".pushsection .init.data,\"aw\"\n\t"
|
||||||
|
".align " __ASM_SEL(4, 8) "\n\t"
|
||||||
|
".type int3_selftest_ip, @object\n\t"
|
||||||
|
".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
|
||||||
|
"int3_selftest_ip:\n\t"
|
||||||
|
__ASM_SEL(.long, .quad) " 1b\n\t"
|
||||||
|
".popsection\n\t"
|
||||||
|
: : __ASM_SEL_RAW(a, D) (&val) : "memory");
|
||||||
|
|
||||||
|
BUG_ON(val != 1);
|
||||||
|
|
||||||
|
unregister_die_notifier(&int3_exception_nb);
|
||||||
|
}
|
||||||
|
|
||||||
void __init alternative_instructions(void)
|
void __init alternative_instructions(void)
|
||||||
{
|
{
|
||||||
/* The patching is not fully atomic, so try to avoid local interruptions
|
int3_selftest();
|
||||||
that might execute the to be patched code.
|
|
||||||
Other CPUs are not running. */
|
/*
|
||||||
|
* The patching is not fully atomic, so try to avoid local
|
||||||
|
* interruptions that might execute the to be patched code.
|
||||||
|
* Other CPUs are not running.
|
||||||
|
*/
|
||||||
stop_nmi();
|
stop_nmi();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -644,10 +716,11 @@ void __init alternative_instructions(void)
|
|||||||
_text, _etext);
|
_text, _etext);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!uniproc_patched || num_possible_cpus() == 1)
|
if (!uniproc_patched || num_possible_cpus() == 1) {
|
||||||
free_init_pages("SMP alternatives",
|
free_init_pages("SMP alternatives",
|
||||||
(unsigned long)__smp_locks,
|
(unsigned long)__smp_locks,
|
||||||
(unsigned long)__smp_locks_end);
|
(unsigned long)__smp_locks_end);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
apply_paravirt(__parainstructions, __parainstructions_end);
|
apply_paravirt(__parainstructions, __parainstructions_end);
|
||||||
|
Loading…
Reference in New Issue
Block a user