forked from Minki/linux
8a25a2fd12
This moves the 'cpu sysdev_class' over to a regular 'cpu' subsystem and converts the devices to regular devices. The sysdev drivers are implemented as subsystem interfaces now. After all sysdev classes are ported to regular driver core entities, the sysdev implementation will be entirely removed from the kernel. Userspace relies on events and generic sysfs subsystem infrastructure from sysdev devices, which are made available with this conversion. Cc: Haavard Skinnemoen <hskinnemoen@gmail.com> Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Borislav Petkov <bp@amd64.org> Cc: Tigran Aivazian <tigran@aivazian.fsnet.co.uk> Cc: Len Brown <lenb@kernel.org> Cc: Zhang Rui <rui.zhang@intel.com> Cc: Dave Jones <davej@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Signed-off-by: Kay Sievers <kay.sievers@vrfy.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
425 lines
9.0 KiB
C
425 lines
9.0 KiB
C
/*
|
|
* cpuidle.c - core cpuidle infrastructure
|
|
*
|
|
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
|
* Shaohua Li <shaohua.li@intel.com>
|
|
* Adam Belay <abelay@novell.com>
|
|
*
|
|
* This code is licenced under the GPL.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/pm_qos.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/cpuidle.h>
|
|
#include <linux/ktime.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/module.h>
|
|
#include <trace/events/power.h>
|
|
|
|
#include "cpuidle.h"
|
|
|
|
DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
|
|
|
DEFINE_MUTEX(cpuidle_lock);
|
|
LIST_HEAD(cpuidle_detected_devices);
|
|
|
|
static int enabled_devices;
|
|
static int off __read_mostly;
|
|
static int initialized __read_mostly;
|
|
|
|
int cpuidle_disabled(void)
|
|
{
|
|
return off;
|
|
}
|
|
void disable_cpuidle(void)
|
|
{
|
|
off = 1;
|
|
}
|
|
|
|
#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
|
|
static void cpuidle_kick_cpus(void)
|
|
{
|
|
cpu_idle_wait();
|
|
}
|
|
#elif defined(CONFIG_SMP)
|
|
# error "Arch needs cpu_idle_wait() equivalent here"
|
|
#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
|
|
static void cpuidle_kick_cpus(void) {}
|
|
#endif
|
|
|
|
static int __cpuidle_register_device(struct cpuidle_device *dev);
|
|
|
|
/**
|
|
* cpuidle_idle_call - the main idle loop
|
|
*
|
|
* NOTE: no locks or semaphores should be used here
|
|
* return non-zero on failure
|
|
*/
|
|
int cpuidle_idle_call(void)
|
|
{
|
|
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
|
struct cpuidle_driver *drv = cpuidle_get_driver();
|
|
struct cpuidle_state *target_state;
|
|
int next_state, entered_state;
|
|
|
|
if (off)
|
|
return -ENODEV;
|
|
|
|
if (!initialized)
|
|
return -ENODEV;
|
|
|
|
/* check if the device is ready */
|
|
if (!dev || !dev->enabled)
|
|
return -EBUSY;
|
|
|
|
#if 0
|
|
/* shows regressions, re-enable for 2.6.29 */
|
|
/*
|
|
* run any timers that can be run now, at this point
|
|
* before calculating the idle duration etc.
|
|
*/
|
|
hrtimer_peek_ahead_timers();
|
|
#endif
|
|
|
|
/* ask the governor for the next state */
|
|
next_state = cpuidle_curr_governor->select(drv, dev);
|
|
if (need_resched()) {
|
|
local_irq_enable();
|
|
return 0;
|
|
}
|
|
|
|
target_state = &drv->states[next_state];
|
|
|
|
trace_power_start(POWER_CSTATE, next_state, dev->cpu);
|
|
trace_cpu_idle(next_state, dev->cpu);
|
|
|
|
entered_state = target_state->enter(dev, drv, next_state);
|
|
|
|
trace_power_end(dev->cpu);
|
|
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
|
|
|
|
if (entered_state >= 0) {
|
|
/* Update cpuidle counters */
|
|
/* This can be moved to within driver enter routine
|
|
* but that results in multiple copies of same code.
|
|
*/
|
|
dev->states_usage[entered_state].time +=
|
|
(unsigned long long)dev->last_residency;
|
|
dev->states_usage[entered_state].usage++;
|
|
}
|
|
|
|
/* give the governor an opportunity to reflect on the outcome */
|
|
if (cpuidle_curr_governor->reflect)
|
|
cpuidle_curr_governor->reflect(dev, entered_state);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* cpuidle_install_idle_handler - installs the cpuidle idle loop handler
|
|
*/
|
|
void cpuidle_install_idle_handler(void)
|
|
{
|
|
if (enabled_devices) {
|
|
/* Make sure all changes finished before we switch to new idle */
|
|
smp_wmb();
|
|
initialized = 1;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
|
|
*/
|
|
void cpuidle_uninstall_idle_handler(void)
|
|
{
|
|
if (enabled_devices) {
|
|
initialized = 0;
|
|
cpuidle_kick_cpus();
|
|
}
|
|
}
|
|
|
|
/**
|
|
* cpuidle_pause_and_lock - temporarily disables CPUIDLE
|
|
*/
|
|
void cpuidle_pause_and_lock(void)
|
|
{
|
|
mutex_lock(&cpuidle_lock);
|
|
cpuidle_uninstall_idle_handler();
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
|
|
|
|
/**
|
|
* cpuidle_resume_and_unlock - resumes CPUIDLE operation
|
|
*/
|
|
void cpuidle_resume_and_unlock(void)
|
|
{
|
|
cpuidle_install_idle_handler();
|
|
mutex_unlock(&cpuidle_lock);
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
|
|
|
|
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
|
|
static int poll_idle(struct cpuidle_device *dev,
|
|
struct cpuidle_driver *drv, int index)
|
|
{
|
|
ktime_t t1, t2;
|
|
s64 diff;
|
|
|
|
t1 = ktime_get();
|
|
local_irq_enable();
|
|
while (!need_resched())
|
|
cpu_relax();
|
|
|
|
t2 = ktime_get();
|
|
diff = ktime_to_us(ktime_sub(t2, t1));
|
|
if (diff > INT_MAX)
|
|
diff = INT_MAX;
|
|
|
|
dev->last_residency = (int) diff;
|
|
|
|
return index;
|
|
}
|
|
|
|
static void poll_idle_init(struct cpuidle_driver *drv)
|
|
{
|
|
struct cpuidle_state *state = &drv->states[0];
|
|
|
|
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
|
|
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
|
|
state->exit_latency = 0;
|
|
state->target_residency = 0;
|
|
state->power_usage = -1;
|
|
state->flags = 0;
|
|
state->enter = poll_idle;
|
|
}
|
|
#else
|
|
static void poll_idle_init(struct cpuidle_driver *drv) {}
|
|
#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
|
|
|
|
/**
|
|
* cpuidle_enable_device - enables idle PM for a CPU
|
|
* @dev: the CPU
|
|
*
|
|
* This function must be called between cpuidle_pause_and_lock and
|
|
* cpuidle_resume_and_unlock when used externally.
|
|
*/
|
|
int cpuidle_enable_device(struct cpuidle_device *dev)
|
|
{
|
|
int ret, i;
|
|
|
|
if (dev->enabled)
|
|
return 0;
|
|
if (!cpuidle_get_driver() || !cpuidle_curr_governor)
|
|
return -EIO;
|
|
if (!dev->state_count)
|
|
return -EINVAL;
|
|
|
|
if (dev->registered == 0) {
|
|
ret = __cpuidle_register_device(dev);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
poll_idle_init(cpuidle_get_driver());
|
|
|
|
if ((ret = cpuidle_add_state_sysfs(dev)))
|
|
return ret;
|
|
|
|
if (cpuidle_curr_governor->enable &&
|
|
(ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
|
|
goto fail_sysfs;
|
|
|
|
for (i = 0; i < dev->state_count; i++) {
|
|
dev->states_usage[i].usage = 0;
|
|
dev->states_usage[i].time = 0;
|
|
}
|
|
dev->last_residency = 0;
|
|
|
|
smp_wmb();
|
|
|
|
dev->enabled = 1;
|
|
|
|
enabled_devices++;
|
|
return 0;
|
|
|
|
fail_sysfs:
|
|
cpuidle_remove_state_sysfs(dev);
|
|
|
|
return ret;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cpuidle_enable_device);
|
|
|
|
/**
|
|
* cpuidle_disable_device - disables idle PM for a CPU
|
|
* @dev: the CPU
|
|
*
|
|
* This function must be called between cpuidle_pause_and_lock and
|
|
* cpuidle_resume_and_unlock when used externally.
|
|
*/
|
|
void cpuidle_disable_device(struct cpuidle_device *dev)
|
|
{
|
|
if (!dev->enabled)
|
|
return;
|
|
if (!cpuidle_get_driver() || !cpuidle_curr_governor)
|
|
return;
|
|
|
|
dev->enabled = 0;
|
|
|
|
if (cpuidle_curr_governor->disable)
|
|
cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
|
|
|
|
cpuidle_remove_state_sysfs(dev);
|
|
enabled_devices--;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
|
|
|
|
/**
|
|
* __cpuidle_register_device - internal register function called before register
|
|
* and enable routines
|
|
* @dev: the cpu
|
|
*
|
|
* cpuidle_lock mutex must be held before this is called
|
|
*/
|
|
static int __cpuidle_register_device(struct cpuidle_device *dev)
|
|
{
|
|
int ret;
|
|
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
|
|
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
|
|
|
|
if (!dev)
|
|
return -EINVAL;
|
|
if (!try_module_get(cpuidle_driver->owner))
|
|
return -EINVAL;
|
|
|
|
init_completion(&dev->kobj_unregister);
|
|
|
|
per_cpu(cpuidle_devices, dev->cpu) = dev;
|
|
list_add(&dev->device_list, &cpuidle_detected_devices);
|
|
if ((ret = cpuidle_add_sysfs(cpu_dev))) {
|
|
module_put(cpuidle_driver->owner);
|
|
return ret;
|
|
}
|
|
|
|
dev->registered = 1;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* cpuidle_register_device - registers a CPU's idle PM feature
|
|
* @dev: the cpu
|
|
*/
|
|
int cpuidle_register_device(struct cpuidle_device *dev)
|
|
{
|
|
int ret;
|
|
|
|
mutex_lock(&cpuidle_lock);
|
|
|
|
if ((ret = __cpuidle_register_device(dev))) {
|
|
mutex_unlock(&cpuidle_lock);
|
|
return ret;
|
|
}
|
|
|
|
cpuidle_enable_device(dev);
|
|
cpuidle_install_idle_handler();
|
|
|
|
mutex_unlock(&cpuidle_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cpuidle_register_device);
|
|
|
|
/**
|
|
* cpuidle_unregister_device - unregisters a CPU's idle PM feature
|
|
* @dev: the cpu
|
|
*/
|
|
void cpuidle_unregister_device(struct cpuidle_device *dev)
|
|
{
|
|
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
|
|
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
|
|
|
|
if (dev->registered == 0)
|
|
return;
|
|
|
|
cpuidle_pause_and_lock();
|
|
|
|
cpuidle_disable_device(dev);
|
|
|
|
cpuidle_remove_sysfs(cpu_dev);
|
|
list_del(&dev->device_list);
|
|
wait_for_completion(&dev->kobj_unregister);
|
|
per_cpu(cpuidle_devices, dev->cpu) = NULL;
|
|
|
|
cpuidle_resume_and_unlock();
|
|
|
|
module_put(cpuidle_driver->owner);
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void smp_callback(void *v)
|
|
{
|
|
/* we already woke the CPU up, nothing more to do */
|
|
}
|
|
|
|
/*
|
|
* This function gets called when a part of the kernel has a new latency
|
|
* requirement. This means we need to get all processors out of their C-state,
|
|
* and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
|
|
* wakes them all right up.
|
|
*/
|
|
static int cpuidle_latency_notify(struct notifier_block *b,
|
|
unsigned long l, void *v)
|
|
{
|
|
smp_call_function(smp_callback, NULL, 1);
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block cpuidle_latency_notifier = {
|
|
.notifier_call = cpuidle_latency_notify,
|
|
};
|
|
|
|
static inline void latency_notifier_init(struct notifier_block *n)
|
|
{
|
|
pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
|
|
}
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
#define latency_notifier_init(x) do { } while (0)
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/**
|
|
* cpuidle_init - core initializer
|
|
*/
|
|
static int __init cpuidle_init(void)
|
|
{
|
|
int ret;
|
|
|
|
if (cpuidle_disabled())
|
|
return -ENODEV;
|
|
|
|
ret = cpuidle_add_interface(cpu_subsys.dev_root);
|
|
if (ret)
|
|
return ret;
|
|
|
|
latency_notifier_init(&cpuidle_latency_notifier);
|
|
|
|
return 0;
|
|
}
|
|
|
|
module_param(off, int, 0444);
|
|
core_initcall(cpuidle_init);
|