mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 18:13:04 +00:00
kprobes, x86: Disable irqs during optimized callback
Disable irqs during optimized callback, so we dont miss any in-irq kprobes. The following commands: # cd /debug/tracing/ # echo "p mutex_unlock" >> kprobe_events # echo "p _raw_spin_lock" >> kprobe_events # echo "p smp_apic_timer_interrupt" >> ./kprobe_events # echo 1 > events/enable Cause the optimized kprobes to be missed. None is missed with the fix applied. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Link: http://lkml.kernel.org/r/20110511110613.GB2390@jolsa.brq.redhat.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
693d92a1bb
commit
9bbeacf52f
@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long flags;
|
||||
|
||||
/* This is possible if op is under delayed unoptimizing */
|
||||
if (kprobe_disabled(&op->kp))
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
if (kprobe_running()) {
|
||||
kprobes_inc_nmissed_count(&op->kp);
|
||||
} else {
|
||||
@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
|
||||
opt_pre_handler(&op->kp, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
|
||||
|
Loading…
Reference in New Issue
Block a user