Merge branches 'pnp', 'pm-cpuidle' and 'pm-cpufreq'

* pnp:
  PNP: Switch from __check_region() to __request_region()

* pm-cpuidle:
  cpuidle: powernv: Avoid endianness conversions while parsing DT
  cpuidle: powernv: Read target_residency value of idle states from DT if available

* pm-cpufreq:
  cpufreq: s3c: remove last use of resume_clocks callback
  cpufreq: s3c: remove incorrect __init annotations
This commit is contained in:
Rafael J. Wysocki 2015-02-21 04:29:16 +01:00
commit 3466b547e3
4 changed files with 56 additions and 48 deletions

View File

@ -263,7 +263,7 @@ out:
}
#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
{
int count, v, i, found;
struct cpufreq_frequency_table *pos;
@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
.notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
};
static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
struct cpufreq_frequency_table *pos;

View File

@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
(cfg->info->set_fvco)(cfg);
}
static inline void s3c_cpufreq_resume_clocks(void)
{
cpu_cur.info->resume_clocks();
}
static inline void s3c_cpufreq_updateclk(struct clk *clk,
unsigned int freq)
{
@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
last_target = ~0; /* invalidate last_target setting */
/* first, find out what speed we resumed at. */
s3c_cpufreq_resume_clocks();
/* whilst we will be called later on, we try and re-set the
* cpu frequencies as soon as possible so that we do not end
* up resuming devices and then immediately having to re-set
@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
};
int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
{
if (!info || !info->name) {
printk(KERN_ERR "%s: failed to pass valid information\n",

View File

@ -13,6 +13,7 @@
#include <linux/notifier.h>
#include <linux/clockchips.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@ -158,70 +159,83 @@ static int powernv_add_idle_states(void)
struct device_node *power_mgt;
int nr_idle_states = 1; /* Snooze */
int dt_idle_states;
const __be32 *idle_state_flags;
const __be32 *idle_state_latency;
u32 len_flags, flags, latency_ns;
int i;
u32 *latency_ns, *residency_ns, *flags;
int i, rc;
/* Currently we have snooze statically defined */
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
pr_warn("opal: PowerMgmt Node not found\n");
return nr_idle_states;
goto out;
}
idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
if (!idle_state_flags) {
pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
return nr_idle_states;
/* Read values of any property to determine the num of idle states */
dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
if (dt_idle_states < 0) {
pr_warn("cpuidle-powernv: no idle states found in the DT\n");
goto out;
}
idle_state_latency = of_get_property(power_mgt,
"ibm,cpu-idle-state-latencies-ns", NULL);
if (!idle_state_latency) {
pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
return nr_idle_states;
flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
if (of_property_read_u32_array(power_mgt,
"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
goto out_free_flags;
}
dt_idle_states = len_flags / sizeof(u32);
latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
rc = of_property_read_u32_array(power_mgt,
"ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
if (rc) {
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
goto out_free_latency;
}
residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
rc = of_property_read_u32_array(power_mgt,
"ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
for (i = 0; i < dt_idle_states; i++) {
flags = be32_to_cpu(idle_state_flags[i]);
/* Cpuidle accepts exit_latency in us and we estimate
* target residency to be 10x exit_latency
/*
* Cpuidle accepts exit_latency and target_residency in us.
* Use default target_residency values if f/w does not expose it.
*/
latency_ns = be32_to_cpu(idle_state_latency[i]);
if (flags & OPAL_PM_NAP_ENABLED) {
if (flags[i] & OPAL_PM_NAP_ENABLED) {
/* Add NAP state */
strcpy(powernv_states[nr_idle_states].name, "Nap");
strcpy(powernv_states[nr_idle_states].desc, "Nap");
powernv_states[nr_idle_states].flags = 0;
powernv_states[nr_idle_states].exit_latency =
((unsigned int)latency_ns) / 1000;
powernv_states[nr_idle_states].target_residency =
((unsigned int)latency_ns / 100);
powernv_states[nr_idle_states].target_residency = 100;
powernv_states[nr_idle_states].enter = &nap_loop;
nr_idle_states++;
}
if (flags & OPAL_PM_SLEEP_ENABLED ||
flags & OPAL_PM_SLEEP_ENABLED_ER1) {
} else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
/* Add FASTSLEEP state */
strcpy(powernv_states[nr_idle_states].name, "FastSleep");
strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
powernv_states[nr_idle_states].exit_latency =
((unsigned int)latency_ns) / 1000;
powernv_states[nr_idle_states].target_residency =
((unsigned int)latency_ns / 100);
powernv_states[nr_idle_states].target_residency = 300000;
powernv_states[nr_idle_states].enter = &fastsleep_loop;
nr_idle_states++;
}
powernv_states[nr_idle_states].exit_latency =
((unsigned int)latency_ns[i]) / 1000;
if (!rc) {
powernv_states[nr_idle_states].target_residency =
((unsigned int)residency_ns[i]) / 1000;
}
nr_idle_states++;
}
kfree(residency_ns);
out_free_latency:
kfree(latency_ns);
out_free_flags:
kfree(flags);
out:
return nr_idle_states;
}

View File

@ -179,8 +179,9 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res)
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
if (!dev->active) {
if (__check_region(&ioport_resource, *port, length(port, end)))
if (!request_region(*port, length(port, end), "pnp"))
return 0;
release_region(*port, length(port, end));
}
/* check if the resource is reserved */
@ -241,8 +242,9 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
if (!dev->active) {
if (check_mem_region(*addr, length(addr, end)))
if (!request_mem_region(*addr, length(addr, end), "pnp"))
return 0;
release_mem_region(*addr, length(addr, end));
}
/* check if the resource is reserved */