From ac8be955049dab828a68b9c68a75144832f8289f Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 20 Jan 2007 00:18:01 +0000 Subject: [PATCH] [MIPS] SMTC: Instant IPI replay. SMTC pseudo-interrupts between TCs are deferred and queued if the target TC is interrupt-inhibited (IXMT). In the first SMTC prototypes, these queued IPIs were serviced on return to user mode, or on entry into the kernel idle loop. The INSTANT_REPLAY option dispatches them as part of local_irq_restore() processing, which adds runtime overhead (hence the option to turn it off), but ensures that IPIs are handled promptly even under heavy I/O interrupt load. Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 14 ++++++++++ arch/mips/kernel/smtc.c | 56 ++++++++++++++++++++++--------------- include/asm-mips/irqflags.h | 22 +++++++++++++++ 3 files changed, 70 insertions(+), 22 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index fd2ff0698a85..bbd386f572d9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1568,6 +1568,20 @@ config MIPS_MT_FPAFF depends on MIPS_MT default y +config MIPS_MT_SMTC_INSTANT_REPLAY + bool "Low-latency Dispatch of Deferred SMTC IPIs" + depends on MIPS_MT_SMTC + default y + help + SMTC pseudo-interrupts between TCs are deferred and queued + if the target TC is interrupt-inhibited (IXMT). In the first + SMTC prototypes, these queued IPIs were serviced on return + to user mode, or on entry into the kernel idle loop. The + INSTANT_REPLAY option dispatches them as part of local_irq_restore() + processing, which adds runtime overhead (hence the option to turn + it off), but ensures that IPIs are handled promptly even under + heavy I/O interrupt load. + config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" depends on MIPS_VPE_LOADER diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a8b387197d5b..44238ab2fc99 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1017,6 +1017,33 @@ void setup_cross_vpe_interrupts(void) * SMTC-specific hacks invoked from elsewhere in the kernel. */ +void smtc_ipi_replay(void) +{ + /* + * To the extent that we've ever turned interrupts off, + * we may have accumulated deferred IPIs. This is subtle. + * If we use the smtc_ipi_qdepth() macro, we'll get an + * exact number - but we'll also disable interrupts + * and create a window of failure where a new IPI gets + * queued after we test the depth but before we re-enable + * interrupts. So long as IXMT never gets set, however, + * we should be OK: If we pick up something and dispatch + * it here, that's great. If we see nothing, but concurrent + * with this operation, another TC sends us an IPI, IXMT + * is clear, and we'll handle it as a real pseudo-interrupt + * and not a pseudo-pseudo interrupt. + */ + if (IPIQ[smp_processor_id()].depth > 0) { + struct smtc_ipi *pipi; + extern void self_ipi(struct smtc_ipi *); + + while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) { + self_ipi(pipi); + smtc_cpu_stats[smp_processor_id()].selfipis++; + } + } +} + void smtc_idle_loop_hook(void) { #ifdef SMTC_IDLE_HOOK_DEBUG @@ -1113,29 +1140,14 @@ void smtc_idle_loop_hook(void) if (pdb_msg != &id_ho_db_msg[0]) printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); #endif /* SMTC_IDLE_HOOK_DEBUG */ - /* - * To the extent that we've ever turned interrupts off, - * we may have accumulated deferred IPIs. This is subtle. - * If we use the smtc_ipi_qdepth() macro, we'll get an - * exact number - but we'll also disable interrupts - * and create a window of failure where a new IPI gets - * queued after we test the depth but before we re-enable - * interrupts. So long as IXMT never gets set, however, - * we should be OK: If we pick up something and dispatch - * it here, that's great. If we see nothing, but concurrent - * with this operation, another TC sends us an IPI, IXMT - * is clear, and we'll handle it as a real pseudo-interrupt - * and not a pseudo-pseudo interrupt. - */ - if (IPIQ[smp_processor_id()].depth > 0) { - struct smtc_ipi *pipi; - extern void self_ipi(struct smtc_ipi *); - if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) { - self_ipi(pipi); - smtc_cpu_stats[smp_processor_id()].selfipis++; - } - } + /* + * Replay any accumulated deferred IPIs. If "Instant Replay" + * is in use, there should never be any. + */ +#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY + smtc_ipi_replay(); +#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ } void smtc_soft_dump(void) diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index 46bf5de5ac72..af3b07dfad4b 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h @@ -15,6 +15,27 @@ #include +/* + * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs, + * at the cost of branch and call overhead on each local_irq_restore() + */ + +#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY + +extern void smtc_ipi_replay(void); + +#define irq_restore_epilog(flags) \ +do { \ + if (!(flags & 0x0400)) \ + smtc_ipi_replay(); \ +} while (0) + +#else + +#define irq_restore_epilog(ignore) do { } while (0) + +#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ + __asm__ ( " .macro raw_local_irq_enable \n" " .set push \n" @@ -193,6 +214,7 @@ do { \ : "=r" (__tmp1) \ : "0" (flags) \ : "memory"); \ + irq_restore_epilog(flags); \ } while(0) static inline int raw_irqs_disabled_flags(unsigned long flags)