diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 29ffaabdf75b..95b66a0c639b 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -56,7 +56,6 @@ struct smp_ops_t { int (*cpu_bootable)(unsigned int nr); }; -extern void smp_flush_nmi_ipi(u64 delay_us); extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern void smp_send_debugger_break(void); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 4794d6b4f4d2..b19d832ef386 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -423,7 +423,8 @@ int smp_handle_nmi_ipi(struct pt_regs *regs) fn(regs); nmi_ipi_lock(); - nmi_ipi_busy_count--; + if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */ + nmi_ipi_busy_count--; out: nmi_ipi_unlock_end(&flags); @@ -448,29 +449,11 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe) } } -void smp_flush_nmi_ipi(u64 delay_us) -{ - unsigned long flags; - - nmi_ipi_lock_start(&flags); - while (nmi_ipi_busy_count) { - nmi_ipi_unlock_end(&flags); - udelay(1); - if (delay_us) { - delay_us--; - if (!delay_us) - return; - } - nmi_ipi_lock_start(&flags); - } - nmi_ipi_unlock_end(&flags); -} - /* * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. * - fn is the target callback function. * - delay_us > 0 is the delay before giving up waiting for targets to - * enter the handler, == 0 specifies indefinite delay. + * complete executing the handler, == 0 specifies indefinite delay. */ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe) { @@ -507,8 +490,23 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool do_smp_send_nmi_ipi(cpu, safe); + nmi_ipi_lock(); + /* nmi_ipi_busy_count is held here, so unlock/lock is okay */ while (!cpumask_empty(&nmi_ipi_pending_mask)) { + nmi_ipi_unlock(); udelay(1); + nmi_ipi_lock(); + if (delay_us) { + delay_us--; + if (!delay_us) + break; + } + } + + while (nmi_ipi_busy_count > 1) { + nmi_ipi_unlock(); + udelay(1); + nmi_ipi_lock(); if (delay_us) { delay_us--; if (!delay_us) @@ -516,12 +514,17 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool } } - nmi_ipi_lock(); if (!cpumask_empty(&nmi_ipi_pending_mask)) { - /* Could not gather all CPUs */ + /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ ret = 0; cpumask_clear(&nmi_ipi_pending_mask); } + if (nmi_ipi_busy_count > 1) { + /* Timeout waiting for CPUs to execute fn */ + ret = 0; + nmi_ipi_busy_count = 1; + } + nmi_ipi_busy_count--; nmi_ipi_unlock_end(&flags); @@ -597,7 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs) * IRQs are already hard disabled by the smp_handle_nmi_ipi. */ nmi_ipi_lock(); - nmi_ipi_busy_count--; + if (nmi_ipi_busy_count > 1) + nmi_ipi_busy_count--; nmi_ipi_unlock(); spin_begin(); diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 1d82274f7e9f..3c6ab22a0c4e 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -174,7 +174,6 @@ static void watchdog_smp_panic(int cpu, u64 tb) continue; smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000); } - smp_flush_nmi_ipi(1000000); } /* Take the stuck CPUs out of the watch group */