mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
powerpc: Unify the 32 and 64 bit idle loops
This unifies the 32-bit (ARCH=ppc and ARCH=powerpc) and 64-bit idle loops. It brings over the concept of having a ppc_md.power_save function from 32-bit to ARCH=powerpc, which lets us get rid of native_idle(). With this we will also be able to simplify the idle handling for pSeries and cell. Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
55aab8cd3a
commit
a0652fc9a2
@ -12,12 +12,12 @@ endif
|
||||
|
||||
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
|
||||
irq.o align.o signal_32.o pmc.o vdso.o \
|
||||
init_task.o process.o systbl.o
|
||||
init_task.o process.o systbl.o idle.o
|
||||
obj-y += vdso32/
|
||||
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
paca.o cpu_setup_power4.o \
|
||||
firmware.o sysfs.o idle_64.o
|
||||
firmware.o sysfs.o
|
||||
obj-$(CONFIG_PPC64) += vdso64/
|
||||
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
|
||||
obj-$(CONFIG_POWER4) += idle_power4.o
|
||||
@ -34,6 +34,7 @@ obj-$(CONFIG_IBMEBUS) += ibmebus.o
|
||||
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
|
||||
obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_6xx) += idle_6xx.o
|
||||
|
||||
ifeq ($(CONFIG_PPC_MERGE),y)
|
||||
|
||||
@ -51,7 +52,6 @@ obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
|
||||
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
|
||||
obj-$(CONFIG_MODULES) += ppc_ksyms.o
|
||||
obj-$(CONFIG_BOOTX_TEXT) += btext.o
|
||||
obj-$(CONFIG_6xx) += idle_6xx.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
|
||||
|
@ -135,10 +135,10 @@ transfer_to_handler:
|
||||
mfspr r11,SPRN_HID0
|
||||
mtcr r11
|
||||
BEGIN_FTR_SECTION
|
||||
bt- 8,power_save_6xx_restore /* Check DOZE */
|
||||
bt- 8,4f /* Check DOZE */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
|
||||
BEGIN_FTR_SECTION
|
||||
bt- 9,power_save_6xx_restore /* Check NAP */
|
||||
bt- 9,4f /* Check NAP */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
#endif /* CONFIG_6xx */
|
||||
.globl transfer_to_handler_cont
|
||||
@ -157,6 +157,10 @@ transfer_to_handler_cont:
|
||||
SYNC
|
||||
RFI /* jump to handler, enable MMU */
|
||||
|
||||
#ifdef CONFIG_6xx
|
||||
4: b power_save_6xx_restore
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On kernel stack overflow, load up an initial stack pointer
|
||||
* and call StackOverflow(regs), which should not return.
|
||||
|
@ -2,13 +2,17 @@
|
||||
* Idle daemon for PowerPC. Idle daemon will handle any action
|
||||
* that needs to be taken when the system becomes idle.
|
||||
*
|
||||
* Originally Written by Cort Dougan (cort@cs.nmt.edu)
|
||||
* Originally written by Cort Dougan (cort@cs.nmt.edu).
|
||||
* Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
|
||||
* Paul Mackerras and others.
|
||||
*
|
||||
* iSeries supported added by Mike Corrigan <mikejc@us.ibm.com>
|
||||
*
|
||||
* Additional shared processor, SMT, and firmware support
|
||||
* Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com>
|
||||
*
|
||||
* 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
@ -29,18 +33,43 @@
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
extern void power4_idle(void);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \
|
||||
system_state == SYSTEM_RUNNING)
|
||||
#else
|
||||
#define cpu_should_die() 0
|
||||
#endif
|
||||
|
||||
void default_idle(void)
|
||||
/*
|
||||
* The body of the idle task.
|
||||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
if (ppc_md.idle_loop)
|
||||
ppc_md.idle_loop(); /* doesn't return */
|
||||
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
|
||||
while (1) {
|
||||
if (!need_resched()) {
|
||||
while (!need_resched() && !cpu_is_offline(cpu)) {
|
||||
ppc64_runlatch_off();
|
||||
ppc64_runlatch_off();
|
||||
|
||||
while (!need_resched() && !cpu_should_die()) {
|
||||
if (ppc_md.power_save) {
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
/*
|
||||
* smp_mb is so clearing of TIF_POLLING_NRFLAG
|
||||
* is ordered w.r.t. need_resched() test.
|
||||
*/
|
||||
smp_mb();
|
||||
local_irq_disable();
|
||||
|
||||
/* check again after disabling irqs */
|
||||
if (!need_resched() && !cpu_should_die())
|
||||
ppc_md.power_save();
|
||||
|
||||
local_irq_enable();
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
|
||||
} else {
|
||||
/*
|
||||
* Go into low thread priority and possibly
|
||||
* low power mode.
|
||||
@ -48,46 +77,18 @@ void default_idle(void)
|
||||
HMT_low();
|
||||
HMT_very_low();
|
||||
}
|
||||
|
||||
HMT_medium();
|
||||
}
|
||||
|
||||
HMT_medium();
|
||||
ppc64_runlatch_on();
|
||||
if (cpu_should_die())
|
||||
cpu_die();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
}
|
||||
}
|
||||
|
||||
void native_idle(void)
|
||||
{
|
||||
while (1) {
|
||||
ppc64_runlatch_off();
|
||||
|
||||
if (!need_resched())
|
||||
power4_idle();
|
||||
|
||||
if (need_resched()) {
|
||||
ppc64_runlatch_on();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
if (cpu_is_offline(smp_processor_id()) &&
|
||||
system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_idle(void)
|
||||
{
|
||||
BUG_ON(NULL == ppc_md.idle_loop);
|
||||
ppc_md.idle_loop();
|
||||
}
|
||||
|
||||
int powersave_nap;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
@ -87,19 +87,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
cmpwi 0,r3,0
|
||||
beqlr
|
||||
|
||||
/* Clear MSR:EE */
|
||||
mfmsr r7
|
||||
rlwinm r0,r7,0,17,15
|
||||
mtmsr r0
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
rlwinm r4,r1,0,0,18
|
||||
lwz r4,TI_FLAGS(r4)
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 1f
|
||||
mtmsr r7 /* out of line this ? */
|
||||
blr
|
||||
1:
|
||||
/* Some pre-nap cleanups needed on some CPUs */
|
||||
andis. r0,r3,HID0_NAP@h
|
||||
beq 2f
|
||||
@ -220,8 +207,6 @@ _GLOBAL(nap_save_msscr0)
|
||||
_GLOBAL(nap_save_hid1)
|
||||
.space 4*NR_CPUS
|
||||
|
||||
_GLOBAL(powersave_nap)
|
||||
.long 0
|
||||
_GLOBAL(powersave_lowspeed)
|
||||
.long 0
|
||||
|
||||
|
@ -49,21 +49,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
|
||||
cmpwi 0,r4,0
|
||||
beqlr
|
||||
|
||||
/* Clear MSR:EE */
|
||||
mfmsr r7
|
||||
li r4,0
|
||||
ori r4,r4,MSR_EE
|
||||
andc r0,r7,r4
|
||||
mtmsrd r0
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
clrrdi r4,r1,THREAD_SHIFT
|
||||
ld r4,TI_FLAGS(r4)
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 1f
|
||||
mtmsrd r7 /* out of line this ? */
|
||||
blr
|
||||
1:
|
||||
/* Go to NAP now */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
|
@ -53,9 +53,6 @@
|
||||
extern void platform_init(void);
|
||||
extern void bootx_init(unsigned long r4, unsigned long phys);
|
||||
|
||||
extern void ppc6xx_idle(void);
|
||||
extern void power4_idle(void);
|
||||
|
||||
boot_infos_t *boot_infos;
|
||||
struct ide_machdep_calls ppc_ide_md;
|
||||
|
||||
@ -194,7 +191,9 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
|
||||
platform_init();
|
||||
|
||||
#ifdef CONFIG_6xx
|
||||
ppc_md.power_save = ppc6xx_idle;
|
||||
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
|
||||
cpu_has_feature(CPU_FTR_CAN_NAP))
|
||||
ppc_md.power_save = ppc6xx_idle;
|
||||
#endif
|
||||
|
||||
if (ppc_md.progress)
|
||||
|
@ -607,12 +607,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
ppc_md.setup_arch();
|
||||
|
||||
/* Use the default idle loop if the platform hasn't provided one. */
|
||||
if (NULL == ppc_md.idle_loop) {
|
||||
ppc_md.idle_loop = default_idle;
|
||||
printk(KERN_INFO "Using default idle loop\n");
|
||||
}
|
||||
|
||||
paging_init();
|
||||
ppc64_boot_msg(0x15, "Setup Done");
|
||||
}
|
||||
|
@ -290,7 +290,7 @@ struct machdep_calls __initdata maple_md = {
|
||||
.get_rtc_time = maple_get_rtc_time,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
.progress = maple_progress,
|
||||
.idle_loop = native_idle,
|
||||
.power_save = power4_idle,
|
||||
#ifdef CONFIG_KEXEC
|
||||
.machine_kexec = default_machine_kexec,
|
||||
.machine_kexec_prepare = default_machine_kexec_prepare,
|
||||
|
@ -733,7 +733,7 @@ struct machdep_calls __initdata pmac_md = {
|
||||
.progress = udbg_progress,
|
||||
#ifdef CONFIG_PPC64
|
||||
.pci_probe_mode = pmac_pci_probe_mode,
|
||||
.idle_loop = native_idle,
|
||||
.power_save = power4_idle,
|
||||
.enable_pmcs = power4_enable_pmcs,
|
||||
#ifdef CONFIG_KEXEC
|
||||
.machine_kexec = default_machine_kexec,
|
||||
|
@ -59,8 +59,6 @@ head-$(CONFIG_4xx) := arch/ppc/kernel/head_4xx.o
|
||||
head-$(CONFIG_44x) := arch/ppc/kernel/head_44x.o
|
||||
head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o
|
||||
|
||||
head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
|
||||
head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
|
||||
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
|
||||
|
||||
core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \
|
||||
|
@ -8,10 +8,9 @@ extra-$(CONFIG_40x) := head_4xx.o
|
||||
extra-$(CONFIG_44x) := head_44x.o
|
||||
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
|
||||
extra-$(CONFIG_8xx) := head_8xx.o
|
||||
extra-$(CONFIG_6xx) += idle_6xx.o
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y := entry.o traps.o idle.o time.o misc.o \
|
||||
obj-y := entry.o traps.o time.o misc.o \
|
||||
setup.o \
|
||||
ppc_htab.o
|
||||
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
|
||||
@ -35,7 +34,6 @@ endif
|
||||
# These are here while we do the architecture merge
|
||||
|
||||
else
|
||||
obj-y := idle.o
|
||||
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
|
||||
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
|
@ -135,10 +135,10 @@ transfer_to_handler:
|
||||
mfspr r11,SPRN_HID0
|
||||
mtcr r11
|
||||
BEGIN_FTR_SECTION
|
||||
bt- 8,power_save_6xx_restore /* Check DOZE */
|
||||
bt- 8,4f /* Check DOZE */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
|
||||
BEGIN_FTR_SECTION
|
||||
bt- 9,power_save_6xx_restore /* Check NAP */
|
||||
bt- 9,4f /* Check NAP */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
#endif /* CONFIG_6xx */
|
||||
.globl transfer_to_handler_cont
|
||||
@ -157,6 +157,10 @@ transfer_to_handler_cont:
|
||||
SYNC
|
||||
RFI /* jump to handler, enable MMU */
|
||||
|
||||
#ifdef CONFIG_6xx
|
||||
4: b power_save_6xx_restore
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On kernel stack overflow, load up an initial stack pointer
|
||||
* and call StackOverflow(regs), which should not return.
|
||||
|
@ -1,112 +0,0 @@
|
||||
/*
|
||||
* Idle daemon for PowerPC. Idle daemon will handle any action
|
||||
* that needs to be taken when the system becomes idle.
|
||||
*
|
||||
* Written by Cort Dougan (cort@cs.nmt.edu). Subsequently hacked
|
||||
* on by Tom Rini, Armin Kuster, Paul Mackerras and others.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/config.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
void default_idle(void)
|
||||
{
|
||||
void (*powersave)(void);
|
||||
|
||||
powersave = ppc_md.power_save;
|
||||
|
||||
if (!need_resched()) {
|
||||
if (powersave != NULL)
|
||||
powersave();
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
while (!need_resched() &&
|
||||
!cpu_is_offline(smp_processor_id()))
|
||||
barrier();
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The body of the idle task.
|
||||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
for (;;) {
|
||||
while (!need_resched()) {
|
||||
if (ppc_md.idle != NULL)
|
||||
ppc_md.idle();
|
||||
else
|
||||
default_idle();
|
||||
}
|
||||
|
||||
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx)
|
||||
/*
|
||||
* Register the sysctl to set/clear powersave_nap.
|
||||
*/
|
||||
extern int powersave_nap;
|
||||
|
||||
static ctl_table powersave_nap_ctl_table[]={
|
||||
{
|
||||
.ctl_name = KERN_PPC_POWERSAVE_NAP,
|
||||
.procname = "powersave-nap",
|
||||
.data = &powersave_nap,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{ 0, },
|
||||
};
|
||||
static ctl_table powersave_nap_sysctl_root[] = {
|
||||
{ 1, "kernel", NULL, 0, 0755, powersave_nap_ctl_table, },
|
||||
{ 0,},
|
||||
};
|
||||
|
||||
static int __init
|
||||
register_powersave_nap_sysctl(void)
|
||||
{
|
||||
register_sysctl_table(powersave_nap_sysctl_root, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall(register_powersave_nap_sysctl);
|
||||
#endif
|
@ -1,233 +0,0 @@
|
||||
/*
|
||||
* This file contains the power_save function for 6xx & 7xxx CPUs
|
||||
* rewritten in assembler
|
||||
*
|
||||
* Warning ! This code assumes that if your machine has a 750fx
|
||||
* it will have PLL 1 set to low speed mode (used during NAP/DOZE).
|
||||
* if this is not the case some additional changes will have to
|
||||
* be done to check a runtime var (a bit like powersave-nap)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* Init idle, called at early CPU setup time from head.S for each CPU
|
||||
* Make sure no rest of NAP mode remains in HID0, save default
|
||||
* values for some CPU specific registers. Called with r24
|
||||
* containing CPU number and r3 reloc offset
|
||||
*/
|
||||
_GLOBAL(init_idle_6xx)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r4,SPRN_HID0
|
||||
rlwinm r4,r4,0,10,8 /* Clear NAP */
|
||||
mtspr SPRN_HID0, r4
|
||||
b 1f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
blr
|
||||
1:
|
||||
slwi r5,r24,2
|
||||
add r5,r5,r3
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r4,SPRN_MSSCR0
|
||||
addis r6,r5, nap_save_msscr0@ha
|
||||
stw r4,nap_save_msscr0@l(r6)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r4,SPRN_HID1
|
||||
addis r6,r5,nap_save_hid1@ha
|
||||
stw r4,nap_save_hid1@l(r6)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
||||
blr
|
||||
|
||||
/*
|
||||
* Here is the power_save_6xx function. This could eventually be
|
||||
* split into several functions & changing the function pointer
|
||||
* depending on the various features.
|
||||
*/
|
||||
_GLOBAL(ppc6xx_idle)
|
||||
/* Check if we can nap or doze, put HID0 mask in r3
|
||||
*/
|
||||
lis r3, 0
|
||||
BEGIN_FTR_SECTION
|
||||
lis r3,HID0_DOZE@h
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
|
||||
BEGIN_FTR_SECTION
|
||||
/* We must dynamically check for the NAP feature as it
|
||||
* can be cleared by CPU init after the fixups are done
|
||||
*/
|
||||
lis r4,cur_cpu_spec@ha
|
||||
lwz r4,cur_cpu_spec@l(r4)
|
||||
lwz r4,CPU_SPEC_FEATURES(r4)
|
||||
andi. r0,r4,CPU_FTR_CAN_NAP
|
||||
beq 1f
|
||||
/* Now check if user or arch enabled NAP mode */
|
||||
lis r4,powersave_nap@ha
|
||||
lwz r4,powersave_nap@l(r4)
|
||||
cmpwi 0,r4,0
|
||||
beq 1f
|
||||
lis r3,HID0_NAP@h
|
||||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
cmpwi 0,r3,0
|
||||
beqlr
|
||||
|
||||
/* Clear MSR:EE */
|
||||
mfmsr r7
|
||||
rlwinm r0,r7,0,17,15
|
||||
mtmsr r0
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
rlwinm r4,r1,0,0,18
|
||||
lwz r4,TI_FLAGS(r4)
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 1f
|
||||
mtmsr r7 /* out of line this ? */
|
||||
blr
|
||||
1:
|
||||
/* Some pre-nap cleanups needed on some CPUs */
|
||||
andis. r0,r3,HID0_NAP@h
|
||||
beq 2f
|
||||
BEGIN_FTR_SECTION
|
||||
/* Disable L2 prefetch on some 745x and try to ensure
|
||||
* L2 prefetch engines are idle. As explained by errata
|
||||
* text, we can't be sure they are, we just hope very hard
|
||||
* that well be enough (sic !). At least I noticed Apple
|
||||
* doesn't even bother doing the dcbf's here...
|
||||
*/
|
||||
mfspr r4,SPRN_MSSCR0
|
||||
rlwinm r4,r4,0,0,29
|
||||
sync
|
||||
mtspr SPRN_MSSCR0,r4
|
||||
sync
|
||||
isync
|
||||
lis r4,KERNELBASE@h
|
||||
dcbf 0,r4
|
||||
dcbf 0,r4
|
||||
dcbf 0,r4
|
||||
dcbf 0,r4
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||
#ifdef DEBUG
|
||||
lis r6,nap_enter_count@ha
|
||||
lwz r4,nap_enter_count@l(r6)
|
||||
addi r4,r4,1
|
||||
stw r4,nap_enter_count@l(r6)
|
||||
#endif
|
||||
2:
|
||||
BEGIN_FTR_SECTION
|
||||
/* Go to low speed mode on some 750FX */
|
||||
lis r4,powersave_lowspeed@ha
|
||||
lwz r4,powersave_lowspeed@l(r4)
|
||||
cmpwi 0,r4,0
|
||||
beq 1f
|
||||
mfspr r4,SPRN_HID1
|
||||
oris r4,r4,0x0001
|
||||
mtspr SPRN_HID1,r4
|
||||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
||||
|
||||
/* Go to NAP or DOZE now */
|
||||
mfspr r4,SPRN_HID0
|
||||
lis r5,(HID0_NAP|HID0_SLEEP)@h
|
||||
BEGIN_FTR_SECTION
|
||||
oris r5,r5,HID0_DOZE@h
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
|
||||
andc r4,r4,r5
|
||||
or r4,r4,r3
|
||||
BEGIN_FTR_SECTION
|
||||
oris r4,r4,HID0_DPM@h /* that should be done once for all */
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
|
||||
mtspr SPRN_HID0,r4
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
ori r7,r7,MSR_EE /* Could be ommited (already set) */
|
||||
oris r7,r7,MSR_POW@h
|
||||
sync
|
||||
isync
|
||||
mtmsr r7
|
||||
isync
|
||||
sync
|
||||
blr
|
||||
|
||||
/*
|
||||
* Return from NAP/DOZE mode, restore some CPU specific registers,
|
||||
* we are called with DR/IR still off and r2 containing physical
|
||||
* address of current.
|
||||
*/
|
||||
_GLOBAL(power_save_6xx_restore)
|
||||
mfspr r11,SPRN_HID0
|
||||
rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
|
||||
cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
|
||||
BEGIN_FTR_SECTION
|
||||
rlwinm r11,r11,0,9,7 /* Clear DOZE */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
|
||||
mtspr SPRN_HID0, r11
|
||||
|
||||
#ifdef DEBUG
|
||||
beq cr1,1f
|
||||
lis r11,(nap_return_count-KERNELBASE)@ha
|
||||
lwz r9,nap_return_count@l(r11)
|
||||
addi r9,r9,1
|
||||
stw r9,nap_return_count@l(r11)
|
||||
1:
|
||||
#endif
|
||||
|
||||
rlwinm r9,r1,0,0,18
|
||||
tophys(r9,r9)
|
||||
lwz r11,TI_CPU(r9)
|
||||
slwi r11,r11,2
|
||||
/* Todo make sure all these are in the same page
|
||||
* and load r22 (@ha part + CPU offset) only once
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
beq cr1,1f
|
||||
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
|
||||
lwz r9,nap_save_msscr0@l(r9)
|
||||
mtspr SPRN_MSSCR0, r9
|
||||
sync
|
||||
isync
|
||||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||
BEGIN_FTR_SECTION
|
||||
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
|
||||
lwz r9,nap_save_hid1@l(r9)
|
||||
mtspr SPRN_HID1, r9
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
||||
b transfer_to_handler_cont
|
||||
|
||||
.data
|
||||
|
||||
_GLOBAL(nap_save_msscr0)
|
||||
.space 4*NR_CPUS
|
||||
|
||||
_GLOBAL(nap_save_hid1)
|
||||
.space 4*NR_CPUS
|
||||
|
||||
_GLOBAL(powersave_nap)
|
||||
.long 0
|
||||
_GLOBAL(powersave_lowspeed)
|
||||
.long 0
|
||||
|
||||
#ifdef DEBUG
|
||||
_GLOBAL(nap_enter_count)
|
||||
.space 4
|
||||
_GLOBAL(nap_return_count)
|
||||
.space 4
|
||||
#endif
|
@ -1,91 +0,0 @@
|
||||
/*
|
||||
* This file contains the power_save function for 6xx & 7xxx CPUs
|
||||
* rewritten in assembler
|
||||
*
|
||||
* Warning ! This code assumes that if your machine has a 750fx
|
||||
* it will have PLL 1 set to low speed mode (used during NAP/DOZE).
|
||||
* if this is not the case some additional changes will have to
|
||||
* be done to check a runtime var (a bit like powersave-nap)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* Init idle, called at early CPU setup time from head.S for each CPU
|
||||
* So nothing for now. Called with r24 containing CPU number and r3
|
||||
* reloc offset
|
||||
*/
|
||||
.globl init_idle_power4
|
||||
init_idle_power4:
|
||||
blr
|
||||
|
||||
/*
|
||||
* Here is the power_save_6xx function. This could eventually be
|
||||
* split into several functions & changing the function pointer
|
||||
* depending on the various features.
|
||||
*/
|
||||
.globl power4_idle
|
||||
power4_idle:
|
||||
BEGIN_FTR_SECTION
|
||||
blr
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
|
||||
/* We must dynamically check for the NAP feature as it
|
||||
* can be cleared by CPU init after the fixups are done
|
||||
*/
|
||||
lis r4,cur_cpu_spec@ha
|
||||
lwz r4,cur_cpu_spec@l(r4)
|
||||
lwz r4,CPU_SPEC_FEATURES(r4)
|
||||
andi. r0,r4,CPU_FTR_CAN_NAP
|
||||
beqlr
|
||||
/* Now check if user or arch enabled NAP mode */
|
||||
lis r4,powersave_nap@ha
|
||||
lwz r4,powersave_nap@l(r4)
|
||||
cmpwi 0,r4,0
|
||||
beqlr
|
||||
|
||||
/* Clear MSR:EE */
|
||||
mfmsr r7
|
||||
rlwinm r0,r7,0,17,15
|
||||
mtmsr r0
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
rlwinm r4,r1,0,0,18
|
||||
lwz r4,TI_FLAGS(r4)
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 1f
|
||||
mtmsr r7 /* out of line this ? */
|
||||
blr
|
||||
1:
|
||||
/* Go to NAP now */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
ori r7,r7,MSR_EE /* Could be ommited (already set) */
|
||||
oris r7,r7,MSR_POW@h
|
||||
sync
|
||||
isync
|
||||
mtmsr r7
|
||||
isync
|
||||
sync
|
||||
blr
|
||||
|
||||
.globl powersave_nap
|
||||
powersave_nap:
|
||||
.long 0
|
@ -158,6 +158,12 @@ struct machdep_calls {
|
||||
/* Idle loop for this platform, leave empty for default idle loop */
|
||||
void (*idle_loop)(void);
|
||||
|
||||
/*
|
||||
* Function for waiting for work with reduced power in idle loop;
|
||||
* called with interrupts disabled.
|
||||
*/
|
||||
void (*power_save)(void);
|
||||
|
||||
/* Function to enable performance monitor counters for this
|
||||
platform, called once per cpu. */
|
||||
void (*enable_pmcs)(void);
|
||||
@ -170,9 +176,6 @@ struct machdep_calls {
|
||||
May be NULL. */
|
||||
void (*init)(void);
|
||||
|
||||
void (*idle)(void);
|
||||
void (*power_save)(void);
|
||||
|
||||
void (*heartbeat)(void);
|
||||
unsigned long heartbeat_reset;
|
||||
unsigned long heartbeat_count;
|
||||
@ -242,8 +245,8 @@ struct machdep_calls {
|
||||
#endif /* CONFIG_KEXEC */
|
||||
};
|
||||
|
||||
extern void default_idle(void);
|
||||
extern void native_idle(void);
|
||||
extern void power4_idle(void);
|
||||
extern void ppc6xx_idle(void);
|
||||
|
||||
extern struct machdep_calls ppc_md;
|
||||
extern char cmd_line[COMMAND_LINE_SIZE];
|
||||
|
@ -622,6 +622,10 @@ extern void ppc64_runlatch_off(void);
|
||||
extern unsigned long scom970_read(unsigned int address);
|
||||
extern void scom970_write(unsigned int address, unsigned long value);
|
||||
|
||||
#else
|
||||
#define ppc64_runlatch_on()
|
||||
#define ppc64_runlatch_off()
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#define __get_SP() ({unsigned long sp; \
|
||||
|
@ -44,7 +44,7 @@ struct machdep_calls {
|
||||
void (*power_off)(void);
|
||||
void (*halt)(void);
|
||||
|
||||
void (*idle)(void);
|
||||
void (*idle_loop)(void);
|
||||
void (*power_save)(void);
|
||||
|
||||
long (*time_init)(void); /* Optional, may be NULL */
|
||||
|
Loading…
Reference in New Issue
Block a user