linux/arch/powerpc/platforms/cell/pervasive.c
Benjamin Herrenschmidt be2cf20a5a powerpc: More fixes for lazy IRQ vs. idle
Looks like we still have issues with pSeries and Cell idle code
vs. the lazy irq state. In fact, the reset fixes that went upstream
are exposing the problem more by causing BUG_ON() to trigger (which
this patch turns into a WARN_ON instead).

We need to be careful when using a variant of low power state that
has the side effect of turning interrupts back on, to properly set
all the SW & lazy state to look as if everything is enabled before
we enter the low power state with MSR:EE off as we will return with
MSR:EE on. If not, we have a discrepancy of state which can cause
things to go very wrong later on.

This patch moves the logic into a helper and uses it from the
pseries and cell idle code. The power4/970 idle code already got
things right (in assembly even !) so I'm not touching it. The power7
"bare metal" idle code is subtly different and correct. Remains PA6T
and some hypervisor based Cell platforms which have questionable
code in there, but they are mostly dead platforms so I'll fix them
when I manage to get final answers from the respective maintainers
about how the low power state actually works on them.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: stable@vger.kernel.org [v3.4]
2012-07-10 19:16:07 +10:00

134 lines
3.2 KiB
C

/*
* CBE Pervasive Monitor and Debug
*
* (C) Copyright IBM Corporation 2005
*
* Authors: Maximino Aguilar (maguilar@us.ibm.com)
* Michael N. Day (mnday@us.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/kallsyms.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/pgtable.h>
#include <asm/reg.h>
#include <asm/cell-regs.h>
#include "pervasive.h"
static void cbe_power_save(void)
{
unsigned long ctrl, thread_switch_control;
/* Ensure our interrupt state is properly tracked */
if (!prep_irq_for_idle())
return;
ctrl = mfspr(SPRN_CTRLF);
/* Enable DEC and EE interrupt request */
thread_switch_control = mfspr(SPRN_TSC_CELL);
thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
switch (ctrl & CTRL_CT) {
case CTRL_CT0:
thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
break;
case CTRL_CT1:
thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
break;
default:
printk(KERN_WARNING "%s: unknown configuration\n",
__func__);
break;
}
mtspr(SPRN_TSC_CELL, thread_switch_control);
/*
* go into low thread priority, medium priority will be
* restored for us after wake-up.
*/
HMT_low();
/*
* atomically disable thread execution and runlatch.
* External and Decrementer exceptions are still handled when the
* thread is disabled but now enter in cbe_system_reset_exception()
*/
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
mtspr(SPRN_CTRLT, ctrl);
/* Re-enable interrupts in MSR */
__hard_irq_enable();
}
static int cbe_system_reset_exception(struct pt_regs *regs)
{
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEEE:
do_IRQ(regs);
break;
case SRR1_WAKEDEC:
timer_interrupt(regs);
break;
case SRR1_WAKEMT:
return cbe_sysreset_hack();
#ifdef CONFIG_CBE_RAS
case SRR1_WAKESYSERR:
cbe_system_error_exception(regs);
break;
case SRR1_WAKETHERM:
cbe_thermal_exception(regs);
break;
#endif /* CONFIG_CBE_RAS */
default:
/* do system reset */
return 0;
}
/* everything handled */
return 1;
}
void __init cbe_pervasive_init(void)
{
int cpu;
if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
return;
for_each_possible_cpu(cpu) {
struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
if (!regs)
continue;
/* Enable Pause(0) control bit */
out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
CBE_PMD_PAUSE_ZERO_CONTROL);
}
ppc_md.power_save = cbe_power_save;
ppc_md.system_reset_exception = cbe_system_reset_exception;
}