mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
[MIPS] SMTC: Instant IPI replay.
SMTC pseudo-interrupts between TCs are deferred and queued if the target TC is interrupt-inhibited (IXMT). In the first SMTC prototypes, these queued IPIs were serviced on return to user mode, or on entry into the kernel idle loop. The INSTANT_REPLAY option dispatches them as part of local_irq_restore() processing, which adds runtime overhead (hence the option to turn it off), but ensures that IPIs are handled promptly even under heavy I/O interrupt load. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
9ee79a3d37
commit
ac8be95504
@ -1568,6 +1568,20 @@ config MIPS_MT_FPAFF
|
||||
depends on MIPS_MT
|
||||
default y
|
||||
|
||||
config MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
bool "Low-latency Dispatch of Deferred SMTC IPIs"
|
||||
depends on MIPS_MT_SMTC
|
||||
default y
|
||||
help
|
||||
SMTC pseudo-interrupts between TCs are deferred and queued
|
||||
if the target TC is interrupt-inhibited (IXMT). In the first
|
||||
SMTC prototypes, these queued IPIs were serviced on return
|
||||
to user mode, or on entry into the kernel idle loop. The
|
||||
INSTANT_REPLAY option dispatches them as part of local_irq_restore()
|
||||
processing, which adds runtime overhead (hence the option to turn
|
||||
it off), but ensures that IPIs are handled promptly even under
|
||||
heavy I/O interrupt load.
|
||||
|
||||
config MIPS_VPE_LOADER_TOM
|
||||
bool "Load VPE program into memory hidden from linux"
|
||||
depends on MIPS_VPE_LOADER
|
||||
|
@ -1017,6 +1017,33 @@ void setup_cross_vpe_interrupts(void)
|
||||
* SMTC-specific hacks invoked from elsewhere in the kernel.
|
||||
*/
|
||||
|
||||
void smtc_ipi_replay(void)
|
||||
{
|
||||
/*
|
||||
* To the extent that we've ever turned interrupts off,
|
||||
* we may have accumulated deferred IPIs. This is subtle.
|
||||
* If we use the smtc_ipi_qdepth() macro, we'll get an
|
||||
* exact number - but we'll also disable interrupts
|
||||
* and create a window of failure where a new IPI gets
|
||||
* queued after we test the depth but before we re-enable
|
||||
* interrupts. So long as IXMT never gets set, however,
|
||||
* we should be OK: If we pick up something and dispatch
|
||||
* it here, that's great. If we see nothing, but concurrent
|
||||
* with this operation, another TC sends us an IPI, IXMT
|
||||
* is clear, and we'll handle it as a real pseudo-interrupt
|
||||
* and not a pseudo-pseudo interrupt.
|
||||
*/
|
||||
if (IPIQ[smp_processor_id()].depth > 0) {
|
||||
struct smtc_ipi *pipi;
|
||||
extern void self_ipi(struct smtc_ipi *);
|
||||
|
||||
while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
|
||||
self_ipi(pipi);
|
||||
smtc_cpu_stats[smp_processor_id()].selfipis++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void smtc_idle_loop_hook(void)
|
||||
{
|
||||
#ifdef SMTC_IDLE_HOOK_DEBUG
|
||||
@ -1113,29 +1140,14 @@ void smtc_idle_loop_hook(void)
|
||||
if (pdb_msg != &id_ho_db_msg[0])
|
||||
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
|
||||
#endif /* SMTC_IDLE_HOOK_DEBUG */
|
||||
/*
|
||||
* To the extent that we've ever turned interrupts off,
|
||||
* we may have accumulated deferred IPIs. This is subtle.
|
||||
* If we use the smtc_ipi_qdepth() macro, we'll get an
|
||||
* exact number - but we'll also disable interrupts
|
||||
* and create a window of failure where a new IPI gets
|
||||
* queued after we test the depth but before we re-enable
|
||||
* interrupts. So long as IXMT never gets set, however,
|
||||
* we should be OK: If we pick up something and dispatch
|
||||
* it here, that's great. If we see nothing, but concurrent
|
||||
* with this operation, another TC sends us an IPI, IXMT
|
||||
* is clear, and we'll handle it as a real pseudo-interrupt
|
||||
* and not a pseudo-pseudo interrupt.
|
||||
*/
|
||||
if (IPIQ[smp_processor_id()].depth > 0) {
|
||||
struct smtc_ipi *pipi;
|
||||
extern void self_ipi(struct smtc_ipi *);
|
||||
|
||||
if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
|
||||
self_ipi(pipi);
|
||||
smtc_cpu_stats[smp_processor_id()].selfipis++;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Replay any accumulated deferred IPIs. If "Instant Replay"
|
||||
* is in use, there should never be any.
|
||||
*/
|
||||
#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
smtc_ipi_replay();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
|
||||
}
|
||||
|
||||
void smtc_soft_dump(void)
|
||||
|
@ -15,6 +15,27 @@
|
||||
|
||||
#include <asm/hazards.h>
|
||||
|
||||
/*
|
||||
* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs,
|
||||
* at the cost of branch and call overhead on each local_irq_restore()
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
|
||||
extern void smtc_ipi_replay(void);
|
||||
|
||||
#define irq_restore_epilog(flags) \
|
||||
do { \
|
||||
if (!(flags & 0x0400)) \
|
||||
smtc_ipi_replay(); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
||||
#define irq_restore_epilog(ignore) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
|
||||
|
||||
__asm__ (
|
||||
" .macro raw_local_irq_enable \n"
|
||||
" .set push \n"
|
||||
@ -193,6 +214,7 @@ do { \
|
||||
: "=r" (__tmp1) \
|
||||
: "0" (flags) \
|
||||
: "memory"); \
|
||||
irq_restore_epilog(flags); \
|
||||
} while(0)
|
||||
|
||||
static inline int raw_irqs_disabled_flags(unsigned long flags)
|
||||
|
Loading…
Reference in New Issue
Block a user