forked from Minki/linux
Merge branch 'pm-cpufreq'
* pm-cpufreq: (37 commits) cpufreq: dt: allow driver to boot automatically intel_pstate: Fix overflow in busy_scaled due to long delay cpufreq: qoriq: optimize the CPU frequency switching time cpufreq: gx-suspmod: Fix two typos in two comments cpufreq: nforce2: Fix typo in comment to function nforce2_init() cpufreq: governor: Serialize governor callbacks cpufreq: governor: split cpufreq_governor_dbs() cpufreq: governor: register notifier from cs_init() cpufreq: Remove cpufreq_update_policy() cpufreq: Restart governor as soon as possible cpufreq: Call cpufreq_policy_put_kobj() from cpufreq_policy_free() cpufreq: Initialize policy->kobj while allocating policy cpufreq: Stop migrating sysfs files on hotplug cpufreq: Don't allow updating inactive policies from sysfs intel_pstate: Force setting target pstate when required intel_pstate: change some inconsistent debug information cpufreq: Track cpu managing sysfs kobjects separately cpufreq: Fix for typos in two comments cpufreq: Mark policy->governor = NULL for inactive policies cpufreq: Manage governor usage history with 'policy->last_governor' ...
This commit is contained in:
commit
8ced6789da
@ -196,8 +196,6 @@ affected_cpus : List of Online CPUs that require software
|
||||
related_cpus : List of Online + Offline CPUs that need software
|
||||
coordination of frequency.
|
||||
|
||||
scaling_driver : Hardware driver for cpufreq.
|
||||
|
||||
scaling_cur_freq : Current frequency of the CPU as determined by
|
||||
the governor and cpufreq core, in KHz. This is
|
||||
the frequency the kernel thinks the CPU runs
|
||||
|
@ -5,7 +5,7 @@
|
||||
# big LITTLE core layer and glue drivers
|
||||
config ARM_BIG_LITTLE_CPUFREQ
|
||||
tristate "Generic ARM big LITTLE CPUfreq driver"
|
||||
depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
|
||||
depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK
|
||||
select PM_OPP
|
||||
help
|
||||
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/bL_switcher.h>
|
||||
|
||||
#include "arm_big_little.h"
|
||||
|
||||
@ -41,12 +40,16 @@
|
||||
#define MAX_CLUSTERS 2
|
||||
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
#include <asm/bL_switcher.h>
|
||||
static bool bL_switching_enabled;
|
||||
#define is_bL_switching_enabled() bL_switching_enabled
|
||||
#define set_switching_enabled(x) (bL_switching_enabled = (x))
|
||||
#else
|
||||
#define is_bL_switching_enabled() false
|
||||
#define set_switching_enabled(x) do { } while (0)
|
||||
#define bL_switch_request(...) do { } while (0)
|
||||
#define bL_switcher_put_enabled() do { } while (0)
|
||||
#define bL_switcher_get_enabled() do { } while (0)
|
||||
#endif
|
||||
|
||||
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
|
||||
@ -186,6 +189,15 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
|
||||
mutex_unlock(&cluster_lock[old_cluster]);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: clk_set_rate has to handle the case where clk_change_rate
|
||||
* can fail due to hardware or firmware issues. Until the clk core
|
||||
* layer is fixed, we can check here. In most of the cases we will
|
||||
* be reading only the cached value anyway. This needs to be removed
|
||||
* once clk core is fixed.
|
||||
*/
|
||||
if (bL_cpufreq_get_rate(cpu) != new_rate)
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -322,7 +334,6 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
|
||||
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
|
||||
{
|
||||
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
|
||||
char name[14] = "cpu-cluster.";
|
||||
int ret;
|
||||
|
||||
if (freq_table[cluster])
|
||||
@ -342,8 +353,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
|
||||
goto free_opp_table;
|
||||
}
|
||||
|
||||
name[12] = cluster + '0';
|
||||
clk[cluster] = clk_get(cpu_dev, name);
|
||||
clk[cluster] = clk_get(cpu_dev, NULL);
|
||||
if (!IS_ERR(clk[cluster])) {
|
||||
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
|
||||
__func__, clk[cluster], freq_table[cluster],
|
||||
@ -506,6 +516,7 @@ static struct cpufreq_driver bL_cpufreq_driver = {
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
|
||||
unsigned long action, void *_arg)
|
||||
{
|
||||
@ -538,6 +549,20 @@ static struct notifier_block bL_switcher_notifier = {
|
||||
.notifier_call = bL_cpufreq_switcher_notifier,
|
||||
};
|
||||
|
||||
static int __bLs_register_notifier(void)
|
||||
{
|
||||
return bL_switcher_register_notifier(&bL_switcher_notifier);
|
||||
}
|
||||
|
||||
static int __bLs_unregister_notifier(void)
|
||||
{
|
||||
return bL_switcher_unregister_notifier(&bL_switcher_notifier);
|
||||
}
|
||||
#else
|
||||
static int __bLs_register_notifier(void) { return 0; }
|
||||
static int __bLs_unregister_notifier(void) { return 0; }
|
||||
#endif
|
||||
|
||||
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
|
||||
{
|
||||
int ret, i;
|
||||
@ -555,8 +580,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
|
||||
|
||||
arm_bL_ops = ops;
|
||||
|
||||
ret = bL_switcher_get_enabled();
|
||||
set_switching_enabled(ret);
|
||||
set_switching_enabled(bL_switcher_get_enabled());
|
||||
|
||||
for (i = 0; i < MAX_CLUSTERS; i++)
|
||||
mutex_init(&cluster_lock[i]);
|
||||
@ -567,7 +591,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
|
||||
__func__, ops->name, ret);
|
||||
arm_bL_ops = NULL;
|
||||
} else {
|
||||
ret = bL_switcher_register_notifier(&bL_switcher_notifier);
|
||||
ret = __bLs_register_notifier();
|
||||
if (ret) {
|
||||
cpufreq_unregister_driver(&bL_cpufreq_driver);
|
||||
arm_bL_ops = NULL;
|
||||
@ -591,7 +615,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
|
||||
}
|
||||
|
||||
bL_switcher_get_enabled();
|
||||
bL_switcher_unregister_notifier(&bL_switcher_notifier);
|
||||
__bLs_unregister_notifier();
|
||||
cpufreq_unregister_driver(&bL_cpufreq_driver);
|
||||
bL_switcher_put_enabled();
|
||||
pr_info("%s: Un-registered platform driver: %s\n", __func__,
|
||||
|
@ -416,6 +416,7 @@ static struct platform_driver dt_cpufreq_platdrv = {
|
||||
};
|
||||
module_platform_driver(dt_cpufreq_platdrv);
|
||||
|
||||
MODULE_ALIAS("platform:cpufreq-dt");
|
||||
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
|
||||
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
|
||||
MODULE_DESCRIPTION("Generic cpufreq driver");
|
||||
|
@ -414,7 +414,7 @@ static int nforce2_detect_chipset(void)
|
||||
* nforce2_init - initializes the nForce2 CPUFreq driver
|
||||
*
|
||||
* Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
|
||||
* devices, -EINVAL on problems during initiatization, and zero on
|
||||
* devices, -EINVAL on problems during initialization, and zero on
|
||||
* success.
|
||||
*/
|
||||
static int __init nforce2_init(void)
|
||||
|
@ -31,10 +31,62 @@
|
||||
#include <linux/tick.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
/* Macros to iterate over lists */
|
||||
/* Iterate over online CPUs policies */
|
||||
static LIST_HEAD(cpufreq_policy_list);
|
||||
#define for_each_policy(__policy) \
|
||||
|
||||
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpumask_empty(policy->cpus);
|
||||
}
|
||||
|
||||
static bool suitable_policy(struct cpufreq_policy *policy, bool active)
|
||||
{
|
||||
return active == !policy_is_inactive(policy);
|
||||
}
|
||||
|
||||
/* Finds Next Acive/Inactive policy */
|
||||
static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
|
||||
bool active)
|
||||
{
|
||||
do {
|
||||
policy = list_next_entry(policy, policy_list);
|
||||
|
||||
/* No more policies in the list */
|
||||
if (&policy->policy_list == &cpufreq_policy_list)
|
||||
return NULL;
|
||||
} while (!suitable_policy(policy, active));
|
||||
|
||||
return policy;
|
||||
}
|
||||
|
||||
static struct cpufreq_policy *first_policy(bool active)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
/* No policies in the list */
|
||||
if (list_empty(&cpufreq_policy_list))
|
||||
return NULL;
|
||||
|
||||
policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
|
||||
policy_list);
|
||||
|
||||
if (!suitable_policy(policy, active))
|
||||
policy = next_policy(policy, active);
|
||||
|
||||
return policy;
|
||||
}
|
||||
|
||||
/* Macros to iterate over CPU policies */
|
||||
#define for_each_suitable_policy(__policy, __active) \
|
||||
for (__policy = first_policy(__active); \
|
||||
__policy; \
|
||||
__policy = next_policy(__policy, __active))
|
||||
|
||||
#define for_each_active_policy(__policy) \
|
||||
for_each_suitable_policy(__policy, true)
|
||||
#define for_each_inactive_policy(__policy) \
|
||||
for_each_suitable_policy(__policy, false)
|
||||
|
||||
#define for_each_policy(__policy) \
|
||||
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
|
||||
|
||||
/* Iterate over governors */
|
||||
@ -49,13 +101,9 @@ static LIST_HEAD(cpufreq_governor_list);
|
||||
*/
|
||||
static struct cpufreq_driver *cpufreq_driver;
|
||||
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
|
||||
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
|
||||
static DEFINE_RWLOCK(cpufreq_driver_lock);
|
||||
DEFINE_MUTEX(cpufreq_governor_lock);
|
||||
|
||||
/* This one keeps track of the previously set governor of a removed CPU */
|
||||
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
|
||||
|
||||
/* Flag to suspend/resume CPUFreq governors */
|
||||
static bool cpufreq_suspended;
|
||||
|
||||
@ -178,7 +226,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
|
||||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
|
||||
/*
|
||||
* The driver only supports the SMP configuartion where all processors
|
||||
* The driver only supports the SMP configuration where all processors
|
||||
* share the clock and voltage and clock.
|
||||
*/
|
||||
cpumask_setall(policy->cpus);
|
||||
@ -187,10 +235,18 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
|
||||
|
||||
unsigned int cpufreq_generic_get(unsigned int cpu)
|
||||
/* Only for cpufreq core internal use */
|
||||
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
|
||||
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
|
||||
}
|
||||
|
||||
unsigned int cpufreq_generic_get(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
|
||||
if (!policy || IS_ERR(policy->clk)) {
|
||||
pr_err("%s: No %s associated to cpu: %d\n",
|
||||
__func__, policy ? "clk" : "policy", cpu);
|
||||
@ -201,18 +257,29 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
|
||||
|
||||
/* Only for cpufreq core internal use */
|
||||
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
|
||||
{
|
||||
return per_cpu(cpufreq_cpu_data, cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_cpu_get: returns policy for a cpu and marks it busy.
|
||||
*
|
||||
* @cpu: cpu to find policy for.
|
||||
*
|
||||
* This returns policy for 'cpu', returns NULL if it doesn't exist.
|
||||
* It also increments the kobject reference count to mark it busy and so would
|
||||
* require a corresponding call to cpufreq_cpu_put() to decrement it back.
|
||||
* If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
|
||||
* freed as that depends on the kobj count.
|
||||
*
|
||||
* It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
|
||||
* valid policy is found. This is done to make sure the driver doesn't get
|
||||
* unregistered while the policy is being used.
|
||||
*
|
||||
* Return: A valid policy on success, otherwise NULL on failure.
|
||||
*/
|
||||
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
if (WARN_ON(cpu >= nr_cpu_ids))
|
||||
return NULL;
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
@ -223,7 +290,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
||||
|
||||
if (cpufreq_driver) {
|
||||
/* get the CPU */
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (policy)
|
||||
kobject_get(&policy->kobj);
|
||||
}
|
||||
@ -237,6 +304,16 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
|
||||
|
||||
/**
|
||||
* cpufreq_cpu_put: Decrements the usage count of a policy
|
||||
*
|
||||
* @policy: policy earlier returned by cpufreq_cpu_get().
|
||||
*
|
||||
* This decrements the kobject reference count incremented earlier by calling
|
||||
* cpufreq_cpu_get().
|
||||
*
|
||||
* It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
|
||||
*/
|
||||
void cpufreq_cpu_put(struct cpufreq_policy *policy)
|
||||
{
|
||||
kobject_put(&policy->kobj);
|
||||
@ -798,11 +875,18 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
/* Updating inactive policies is invalid, so avoid doing that. */
|
||||
if (unlikely(policy_is_inactive(policy))) {
|
||||
ret = -EBUSY;
|
||||
goto unlock_policy_rwsem;
|
||||
}
|
||||
|
||||
if (fattr->store)
|
||||
ret = fattr->store(policy, buf, count);
|
||||
else
|
||||
ret = -EIO;
|
||||
|
||||
unlock_policy_rwsem:
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
up_read(&cpufreq_rwsem);
|
||||
@ -873,28 +957,67 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr)
|
||||
}
|
||||
EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
|
||||
|
||||
/* symlink affected CPUs */
|
||||
static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
|
||||
pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
|
||||
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (WARN_ON(!cpu_dev))
|
||||
return 0;
|
||||
|
||||
return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
|
||||
}
|
||||
|
||||
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
|
||||
pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (WARN_ON(!cpu_dev))
|
||||
return;
|
||||
|
||||
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
|
||||
}
|
||||
|
||||
/* Add/remove symlinks for all related CPUs */
|
||||
static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int j;
|
||||
int ret = 0;
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct device *cpu_dev;
|
||||
|
||||
if (j == policy->cpu)
|
||||
/* Some related CPUs might not be present (physically hotplugged) */
|
||||
for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
||||
if (j == policy->kobj_cpu)
|
||||
continue;
|
||||
|
||||
pr_debug("Adding link for CPU: %u\n", j);
|
||||
cpu_dev = get_cpu_device(j);
|
||||
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
|
||||
"cpufreq");
|
||||
ret = add_cpu_dev_symlink(policy, j);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int j;
|
||||
|
||||
/* Some related CPUs might not be present (physically hotplugged) */
|
||||
for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
||||
if (j == policy->kobj_cpu)
|
||||
continue;
|
||||
|
||||
remove_cpu_dev_symlink(policy, j);
|
||||
}
|
||||
}
|
||||
|
||||
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
|
||||
struct device *dev)
|
||||
{
|
||||
@ -937,7 +1060,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
memcpy(&new_policy, policy, sizeof(*policy));
|
||||
|
||||
/* Update governor of new_policy to the governor used before hotplug */
|
||||
gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
|
||||
gov = find_governor(policy->last_governor);
|
||||
if (gov)
|
||||
pr_debug("Restoring governor %s for cpu %d\n",
|
||||
policy->governor->name, policy->cpu);
|
||||
@ -963,7 +1086,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
||||
unsigned int cpu, struct device *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
/* Has this CPU been taken care of already? */
|
||||
if (cpumask_test_cpu(cpu, policy->cpus))
|
||||
return 0;
|
||||
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
||||
@ -974,13 +1100,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
|
||||
cpumask_set_cpu(cpu, policy->cpus);
|
||||
per_cpu(cpufreq_cpu_data, cpu) = policy;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
if (has_target()) {
|
||||
@ -994,7 +1114,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
||||
}
|
||||
}
|
||||
|
||||
return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
|
||||
@ -1003,20 +1123,25 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
|
||||
policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
|
||||
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
if (policy)
|
||||
policy->governor = NULL;
|
||||
if (likely(policy)) {
|
||||
/* Policy should be inactive here */
|
||||
WARN_ON(!policy_is_inactive(policy));
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
policy->cpu = cpu;
|
||||
up_write(&policy->rwsem);
|
||||
}
|
||||
|
||||
return policy;
|
||||
}
|
||||
|
||||
static struct cpufreq_policy *cpufreq_policy_alloc(void)
|
||||
static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
int ret;
|
||||
|
||||
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
|
||||
if (!policy)
|
||||
@ -1028,6 +1153,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
|
||||
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
|
||||
goto err_free_cpumask;
|
||||
|
||||
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
|
||||
"cpufreq");
|
||||
if (ret) {
|
||||
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
|
||||
goto err_free_rcpumask;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&policy->policy_list);
|
||||
init_rwsem(&policy->rwsem);
|
||||
spin_lock_init(&policy->transition_lock);
|
||||
@ -1035,8 +1167,15 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
|
||||
init_completion(&policy->kobj_unregister);
|
||||
INIT_WORK(&policy->update, handle_update);
|
||||
|
||||
policy->cpu = dev->id;
|
||||
|
||||
/* Set this once on allocation */
|
||||
policy->kobj_cpu = dev->id;
|
||||
|
||||
return policy;
|
||||
|
||||
err_free_rcpumask:
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
err_free_cpumask:
|
||||
free_cpumask_var(policy->cpus);
|
||||
err_free_policy:
|
||||
@ -1045,18 +1184,20 @@ err_free_policy:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
|
||||
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
|
||||
{
|
||||
struct kobject *kobj;
|
||||
struct completion *cmp;
|
||||
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_REMOVE_POLICY, policy);
|
||||
if (notify)
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_REMOVE_POLICY, policy);
|
||||
|
||||
down_read(&policy->rwsem);
|
||||
down_write(&policy->rwsem);
|
||||
cpufreq_remove_dev_symlink(policy);
|
||||
kobj = &policy->kobj;
|
||||
cmp = &policy->kobj_unregister;
|
||||
up_read(&policy->rwsem);
|
||||
up_write(&policy->rwsem);
|
||||
kobject_put(kobj);
|
||||
|
||||
/*
|
||||
@ -1069,68 +1210,64 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
|
||||
pr_debug("wait complete\n");
|
||||
}
|
||||
|
||||
static void cpufreq_policy_free(struct cpufreq_policy *policy)
|
||||
static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
/* Remove policy from list */
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
list_del(&policy->policy_list);
|
||||
|
||||
for_each_cpu(cpu, policy->related_cpus)
|
||||
per_cpu(cpufreq_cpu_data, cpu) = NULL;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
cpufreq_policy_put_kobj(policy, notify);
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
free_cpumask_var(policy->cpus);
|
||||
kfree(policy);
|
||||
}
|
||||
|
||||
static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
|
||||
struct device *cpu_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(cpu == policy->cpu))
|
||||
return 0;
|
||||
|
||||
/* Move kobject to the new policy->cpu */
|
||||
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
policy->cpu = cpu;
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
/**
|
||||
* cpufreq_add_dev - add a CPU device
|
||||
*
|
||||
* Adds the cpufreq interface for a CPU device.
|
||||
*
|
||||
* The Oracle says: try running cpufreq registration/unregistration concurrently
|
||||
* with with cpu hotplugging and all hell will break loose. Tried to clean this
|
||||
* mess up, but more thorough testing is needed. - Mathieu
|
||||
*/
|
||||
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
{
|
||||
unsigned int j, cpu = dev->id;
|
||||
int ret = -ENOMEM;
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned long flags;
|
||||
bool recover_policy = cpufreq_suspended;
|
||||
|
||||
if (cpu_is_offline(cpu))
|
||||
return 0;
|
||||
bool recover_policy = !sif;
|
||||
|
||||
pr_debug("adding CPU %u\n", cpu);
|
||||
|
||||
/* check whether a different CPU already registered this
|
||||
* CPU because it is in the same boat. */
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(policy))
|
||||
return 0;
|
||||
/*
|
||||
* Only possible if 'cpu' wasn't physically present earlier and we are
|
||||
* here from subsys_interface add callback. A hotplug notifier will
|
||||
* follow and we will handle it like logical CPU hotplug then. For now,
|
||||
* just create the sysfs link.
|
||||
*/
|
||||
if (cpu_is_offline(cpu))
|
||||
return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
return 0;
|
||||
|
||||
/* Check if this cpu was hot-unplugged earlier and has siblings */
|
||||
read_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
for_each_policy(policy) {
|
||||
if (cpumask_test_cpu(cpu, policy->related_cpus)) {
|
||||
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
ret = cpufreq_add_policy_cpu(policy, cpu, dev);
|
||||
up_read(&cpufreq_rwsem);
|
||||
return ret;
|
||||
}
|
||||
/* Check if this CPU already has a policy to manage it */
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
if (policy && !policy_is_inactive(policy)) {
|
||||
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
|
||||
ret = cpufreq_add_policy_cpu(policy, cpu, dev);
|
||||
up_read(&cpufreq_rwsem);
|
||||
return ret;
|
||||
}
|
||||
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
/*
|
||||
* Restore the saved policy when doing light-weight init and fall back
|
||||
@ -1139,22 +1276,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
|
||||
if (!policy) {
|
||||
recover_policy = false;
|
||||
policy = cpufreq_policy_alloc();
|
||||
policy = cpufreq_policy_alloc(dev);
|
||||
if (!policy)
|
||||
goto nomem_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* In the resume path, since we restore a saved policy, the assignment
|
||||
* to policy->cpu is like an update of the existing policy, rather than
|
||||
* the creation of a brand new one. So we need to perform this update
|
||||
* by invoking update_policy_cpu().
|
||||
*/
|
||||
if (recover_policy && cpu != policy->cpu)
|
||||
WARN_ON(update_policy_cpu(policy, cpu, dev));
|
||||
else
|
||||
policy->cpu = cpu;
|
||||
|
||||
cpumask_copy(policy->cpus, cpumask_of(cpu));
|
||||
|
||||
/* call driver. From then on the cpufreq must be able
|
||||
@ -1181,21 +1307,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
policy->user_policy.min = policy->min;
|
||||
policy->user_policy.max = policy->max;
|
||||
|
||||
/* prepare interface data */
|
||||
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
|
||||
&dev->kobj, "cpufreq");
|
||||
if (ret) {
|
||||
pr_err("%s: failed to init policy->kobj: %d\n",
|
||||
__func__, ret);
|
||||
goto err_init_policy_kobj;
|
||||
}
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
for_each_cpu(j, policy->related_cpus)
|
||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
}
|
||||
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
for_each_cpu(j, policy->cpus)
|
||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
|
||||
policy->cur = cpufreq_driver->get(policy->cpu);
|
||||
if (!policy->cur) {
|
||||
@ -1253,11 +1370,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
goto err_out_unregister;
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_CREATE_POLICY, policy);
|
||||
}
|
||||
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
list_add(&policy->policy_list, &cpufreq_policy_list);
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
list_add(&policy->policy_list, &cpufreq_policy_list);
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
}
|
||||
|
||||
cpufreq_init_policy(policy);
|
||||
|
||||
@ -1281,68 +1398,28 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
|
||||
err_out_unregister:
|
||||
err_get_freq:
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
for_each_cpu(j, policy->cpus)
|
||||
per_cpu(cpufreq_cpu_data, j) = NULL;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
if (!recover_policy) {
|
||||
kobject_put(&policy->kobj);
|
||||
wait_for_completion(&policy->kobj_unregister);
|
||||
}
|
||||
err_init_policy_kobj:
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
err_set_policy_cpu:
|
||||
if (recover_policy) {
|
||||
/* Do not leave stale fallback data behind. */
|
||||
per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
|
||||
cpufreq_policy_put_kobj(policy);
|
||||
}
|
||||
cpufreq_policy_free(policy);
|
||||
|
||||
cpufreq_policy_free(policy, recover_policy);
|
||||
nomem_out:
|
||||
up_read(&cpufreq_rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_add_dev - add a CPU device
|
||||
*
|
||||
* Adds the cpufreq interface for a CPU device.
|
||||
*
|
||||
* The Oracle says: try running cpufreq registration/unregistration concurrently
|
||||
* with with cpu hotplugging and all hell will break loose. Tried to clean this
|
||||
* mess up, but more thorough testing is needed. - Mathieu
|
||||
*/
|
||||
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
{
|
||||
return __cpufreq_add_dev(dev, sif);
|
||||
}
|
||||
|
||||
static int __cpufreq_remove_dev_prepare(struct device *dev,
|
||||
struct subsys_interface *sif)
|
||||
{
|
||||
unsigned int cpu = dev->id, cpus;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
unsigned int cpu = dev->id;
|
||||
int ret = 0;
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
|
||||
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
|
||||
/* Save the policy somewhere when doing a light-weight tear-down */
|
||||
if (cpufreq_suspended)
|
||||
per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
|
||||
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (!policy) {
|
||||
pr_debug("%s: No cpu_data found\n", __func__);
|
||||
return -EINVAL;
|
||||
@ -1354,108 +1431,75 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
||||
pr_err("%s: Failed to stop governor\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
strncpy(per_cpu(cpufreq_cpu_governor, cpu),
|
||||
policy->governor->name, CPUFREQ_NAME_LEN);
|
||||
}
|
||||
|
||||
down_read(&policy->rwsem);
|
||||
cpus = cpumask_weight(policy->cpus);
|
||||
up_read(&policy->rwsem);
|
||||
down_write(&policy->rwsem);
|
||||
cpumask_clear_cpu(cpu, policy->cpus);
|
||||
|
||||
if (cpu != policy->cpu) {
|
||||
sysfs_remove_link(&dev->kobj, "cpufreq");
|
||||
} else if (cpus > 1) {
|
||||
if (policy_is_inactive(policy)) {
|
||||
if (has_target())
|
||||
strncpy(policy->last_governor, policy->governor->name,
|
||||
CPUFREQ_NAME_LEN);
|
||||
} else if (cpu == policy->cpu) {
|
||||
/* Nominate new CPU */
|
||||
int new_cpu = cpumask_any_but(policy->cpus, cpu);
|
||||
struct device *cpu_dev = get_cpu_device(new_cpu);
|
||||
policy->cpu = cpumask_any(policy->cpus);
|
||||
}
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
|
||||
ret = update_policy_cpu(policy, new_cpu, cpu_dev);
|
||||
if (ret) {
|
||||
if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
|
||||
"cpufreq"))
|
||||
pr_err("%s: Failed to restore kobj link to cpu:%d\n",
|
||||
__func__, cpu_dev->id);
|
||||
return ret;
|
||||
/* Start governor again for active policy */
|
||||
if (!policy_is_inactive(policy)) {
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
|
||||
if (!ret)
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
||||
|
||||
if (ret)
|
||||
pr_err("%s: Failed to start governor\n", __func__);
|
||||
}
|
||||
|
||||
if (!cpufreq_suspended)
|
||||
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
|
||||
__func__, new_cpu, cpu);
|
||||
} else if (cpufreq_driver->stop_cpu) {
|
||||
cpufreq_driver->stop_cpu(policy);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
struct subsys_interface *sif)
|
||||
{
|
||||
unsigned int cpu = dev->id, cpus;
|
||||
unsigned int cpu = dev->id;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
per_cpu(cpufreq_cpu_data, cpu) = NULL;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
|
||||
if (!policy) {
|
||||
pr_debug("%s: No cpu_data found\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
cpus = cpumask_weight(policy->cpus);
|
||||
|
||||
if (cpus > 1)
|
||||
cpumask_clear_cpu(cpu, policy->cpus);
|
||||
up_write(&policy->rwsem);
|
||||
/* Only proceed for inactive policies */
|
||||
if (!policy_is_inactive(policy))
|
||||
return 0;
|
||||
|
||||
/* If cpu is last user of policy, free policy */
|
||||
if (cpus == 1) {
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy,
|
||||
CPUFREQ_GOV_POLICY_EXIT);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to exit governor\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpufreq_suspended)
|
||||
cpufreq_policy_put_kobj(policy);
|
||||
|
||||
/*
|
||||
* Perform the ->exit() even during light-weight tear-down,
|
||||
* since this is a core component, and is essential for the
|
||||
* subsequent light-weight ->init() to succeed.
|
||||
*/
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
|
||||
/* Remove policy from list of active policies */
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
list_del(&policy->policy_list);
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
if (!cpufreq_suspended)
|
||||
cpufreq_policy_free(policy);
|
||||
} else if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
|
||||
if (!ret)
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
||||
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to start governor\n", __func__);
|
||||
pr_err("%s: Failed to exit governor\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the ->exit() even during light-weight tear-down,
|
||||
* since this is a core component, and is essential for the
|
||||
* subsequent light-weight ->init() to succeed.
|
||||
*/
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
|
||||
/* Free the policy only if the driver is getting removed. */
|
||||
if (sif)
|
||||
cpufreq_policy_free(policy, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1469,8 +1513,33 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
|
||||
unsigned int cpu = dev->id;
|
||||
int ret;
|
||||
|
||||
if (cpu_is_offline(cpu))
|
||||
/*
|
||||
* Only possible if 'cpu' is getting physically removed now. A hotplug
|
||||
* notifier should have already been called and we just need to remove
|
||||
* link or free policy here.
|
||||
*/
|
||||
if (cpu_is_offline(cpu)) {
|
||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
struct cpumask mask;
|
||||
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
cpumask_copy(&mask, policy->related_cpus);
|
||||
cpumask_clear_cpu(cpu, &mask);
|
||||
|
||||
/*
|
||||
* Free policy only if all policy->related_cpus are removed
|
||||
* physically.
|
||||
*/
|
||||
if (cpumask_intersects(&mask, cpu_present_mask)) {
|
||||
remove_cpu_dev_symlink(policy, cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cpufreq_policy_free(policy, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __cpufreq_remove_dev_prepare(dev, sif);
|
||||
|
||||
@ -1567,6 +1636,10 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
|
||||
|
||||
ret_freq = cpufreq_driver->get(policy->cpu);
|
||||
|
||||
/* Updating inactive policies is invalid, so avoid doing that. */
|
||||
if (unlikely(policy_is_inactive(policy)))
|
||||
return ret_freq;
|
||||
|
||||
if (ret_freq && policy->cur &&
|
||||
!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
|
||||
/* verify no discrepancy between actual and
|
||||
@ -1656,7 +1729,7 @@ void cpufreq_suspend(void)
|
||||
|
||||
pr_debug("%s: Suspending Governors\n", __func__);
|
||||
|
||||
for_each_policy(policy) {
|
||||
for_each_active_policy(policy) {
|
||||
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
|
||||
pr_err("%s: Failed to stop governor for policy: %p\n",
|
||||
__func__, policy);
|
||||
@ -1690,7 +1763,7 @@ void cpufreq_resume(void)
|
||||
|
||||
pr_debug("%s: Resuming Governors\n", __func__);
|
||||
|
||||
for_each_policy(policy) {
|
||||
for_each_active_policy(policy) {
|
||||
if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
|
||||
pr_err("%s: Failed to resume driver: %p\n", __func__,
|
||||
policy);
|
||||
@ -1891,7 +1964,7 @@ static int __target_index(struct cpufreq_policy *policy,
|
||||
* Failed after setting to intermediate freq? Driver should have
|
||||
* reverted back to initial frequency and so should we. Check
|
||||
* here for intermediate_freq instead of get_intermediate, in
|
||||
* case we have't switched to intermediate freq at all.
|
||||
* case we haven't switched to intermediate freq at all.
|
||||
*/
|
||||
if (unlikely(retval && intermediate_freq)) {
|
||||
freqs.old = intermediate_freq;
|
||||
@ -2092,7 +2165,8 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
|
||||
|
||||
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
|
||||
{
|
||||
int cpu;
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned long flags;
|
||||
|
||||
if (!governor)
|
||||
return;
|
||||
@ -2100,12 +2174,15 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
|
||||
if (cpufreq_disabled())
|
||||
return;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
if (cpu_online(cpu))
|
||||
continue;
|
||||
if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
|
||||
strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
|
||||
/* clear last_governor for all inactive policies */
|
||||
read_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
for_each_inactive_policy(policy) {
|
||||
if (!strcmp(policy->last_governor, governor->name)) {
|
||||
policy->governor = NULL;
|
||||
strcpy(policy->last_governor, "\0");
|
||||
}
|
||||
}
|
||||
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
list_del(&governor->governor_list);
|
||||
@ -2304,7 +2381,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
if (dev) {
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
__cpufreq_add_dev(dev, NULL);
|
||||
cpufreq_add_dev(dev, NULL);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
@ -2316,7 +2393,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
break;
|
||||
|
||||
case CPU_DOWN_FAILED:
|
||||
__cpufreq_add_dev(dev, NULL);
|
||||
cpufreq_add_dev(dev, NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -2336,7 +2413,7 @@ static int cpufreq_boost_set_sw(int state)
|
||||
struct cpufreq_policy *policy;
|
||||
int ret = -EINVAL;
|
||||
|
||||
for_each_policy(policy) {
|
||||
for_each_active_policy(policy) {
|
||||
freq_table = cpufreq_frequency_get_table(policy->cpu);
|
||||
if (freq_table) {
|
||||
ret = cpufreq_frequency_table_cpuinfo(policy,
|
||||
|
@ -148,6 +148,10 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block cs_cpufreq_notifier_block = {
|
||||
.notifier_call = dbs_cpufreq_notifier,
|
||||
};
|
||||
|
||||
/************************** sysfs interface ************************/
|
||||
static struct common_dbs_data cs_dbs_cdata;
|
||||
|
||||
@ -317,7 +321,7 @@ static struct attribute_group cs_attr_group_gov_pol = {
|
||||
|
||||
/************************** sysfs end ************************/
|
||||
|
||||
static int cs_init(struct dbs_data *dbs_data)
|
||||
static int cs_init(struct dbs_data *dbs_data, bool notify)
|
||||
{
|
||||
struct cs_dbs_tuners *tuners;
|
||||
|
||||
@ -336,25 +340,25 @@ static int cs_init(struct dbs_data *dbs_data)
|
||||
dbs_data->tuners = tuners;
|
||||
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
||||
jiffies_to_usecs(10);
|
||||
mutex_init(&dbs_data->mutex);
|
||||
|
||||
if (notify)
|
||||
cpufreq_register_notifier(&cs_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cs_exit(struct dbs_data *dbs_data)
|
||||
static void cs_exit(struct dbs_data *dbs_data, bool notify)
|
||||
{
|
||||
if (notify)
|
||||
cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
||||
kfree(dbs_data->tuners);
|
||||
}
|
||||
|
||||
define_get_cpu_dbs_routines(cs_cpu_dbs_info);
|
||||
|
||||
static struct notifier_block cs_cpufreq_notifier_block = {
|
||||
.notifier_call = dbs_cpufreq_notifier,
|
||||
};
|
||||
|
||||
static struct cs_ops cs_ops = {
|
||||
.notifier_block = &cs_cpufreq_notifier_block,
|
||||
};
|
||||
|
||||
static struct common_dbs_data cs_dbs_cdata = {
|
||||
.governor = GOV_CONSERVATIVE,
|
||||
.attr_group_gov_sys = &cs_attr_group_gov_sys,
|
||||
@ -363,9 +367,9 @@ static struct common_dbs_data cs_dbs_cdata = {
|
||||
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
|
||||
.gov_dbs_timer = cs_dbs_timer,
|
||||
.gov_check_cpu = cs_check_cpu,
|
||||
.gov_ops = &cs_ops,
|
||||
.init = cs_init,
|
||||
.exit = cs_exit,
|
||||
.mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
|
||||
};
|
||||
|
||||
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
|
@ -239,211 +239,242 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int cpufreq_governor_init(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data,
|
||||
struct common_dbs_data *cdata)
|
||||
{
|
||||
unsigned int latency;
|
||||
int ret;
|
||||
|
||||
if (dbs_data) {
|
||||
if (WARN_ON(have_governor_per_policy()))
|
||||
return -EINVAL;
|
||||
dbs_data->usage_count++;
|
||||
policy->governor_data = dbs_data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
|
||||
if (!dbs_data)
|
||||
return -ENOMEM;
|
||||
|
||||
dbs_data->cdata = cdata;
|
||||
dbs_data->usage_count = 1;
|
||||
|
||||
ret = cdata->init(dbs_data, !policy->governor->initialized);
|
||||
if (ret)
|
||||
goto free_dbs_data;
|
||||
|
||||
/* policy latency is in ns. Convert it to us first */
|
||||
latency = policy->cpuinfo.transition_latency / 1000;
|
||||
if (latency == 0)
|
||||
latency = 1;
|
||||
|
||||
/* Bring kernel and HW constraints together */
|
||||
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
|
||||
MIN_LATENCY_MULTIPLIER * latency);
|
||||
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
|
||||
latency * LATENCY_MULTIPLIER));
|
||||
|
||||
if (!have_governor_per_policy()) {
|
||||
if (WARN_ON(cpufreq_get_global_kobject())) {
|
||||
ret = -EINVAL;
|
||||
goto cdata_exit;
|
||||
}
|
||||
cdata->gdbs_data = dbs_data;
|
||||
}
|
||||
|
||||
ret = sysfs_create_group(get_governor_parent_kobj(policy),
|
||||
get_sysfs_attr(dbs_data));
|
||||
if (ret)
|
||||
goto put_kobj;
|
||||
|
||||
policy->governor_data = dbs_data;
|
||||
|
||||
return 0;
|
||||
|
||||
put_kobj:
|
||||
if (!have_governor_per_policy()) {
|
||||
cdata->gdbs_data = NULL;
|
||||
cpufreq_put_global_kobject();
|
||||
}
|
||||
cdata_exit:
|
||||
cdata->exit(dbs_data, !policy->governor->initialized);
|
||||
free_dbs_data:
|
||||
kfree(dbs_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cpufreq_governor_exit(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
|
||||
policy->governor_data = NULL;
|
||||
if (!--dbs_data->usage_count) {
|
||||
sysfs_remove_group(get_governor_parent_kobj(policy),
|
||||
get_sysfs_attr(dbs_data));
|
||||
|
||||
if (!have_governor_per_policy()) {
|
||||
cdata->gdbs_data = NULL;
|
||||
cpufreq_put_global_kobject();
|
||||
}
|
||||
|
||||
cdata->exit(dbs_data, policy->governor->initialized == 1);
|
||||
kfree(dbs_data);
|
||||
}
|
||||
}
|
||||
|
||||
static int cpufreq_governor_start(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
|
||||
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
int io_busy = 0;
|
||||
|
||||
if (!policy->cur)
|
||||
return -EINVAL;
|
||||
|
||||
if (cdata->governor == GOV_CONSERVATIVE) {
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
|
||||
sampling_rate = cs_tuners->sampling_rate;
|
||||
ignore_nice = cs_tuners->ignore_nice_load;
|
||||
} else {
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
|
||||
sampling_rate = od_tuners->sampling_rate;
|
||||
ignore_nice = od_tuners->ignore_nice_load;
|
||||
io_busy = od_tuners->io_is_busy;
|
||||
}
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j);
|
||||
unsigned int prev_load;
|
||||
|
||||
j_cdbs->cpu = j;
|
||||
j_cdbs->cur_policy = policy;
|
||||
j_cdbs->prev_cpu_idle =
|
||||
get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
|
||||
|
||||
prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
|
||||
j_cdbs->prev_cpu_idle);
|
||||
j_cdbs->prev_load = 100 * prev_load /
|
||||
(unsigned int)j_cdbs->prev_cpu_wall;
|
||||
|
||||
if (ignore_nice)
|
||||
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
|
||||
mutex_init(&j_cdbs->timer_mutex);
|
||||
INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
|
||||
}
|
||||
|
||||
if (cdata->governor == GOV_CONSERVATIVE) {
|
||||
struct cs_cpu_dbs_info_s *cs_dbs_info =
|
||||
cdata->get_cpu_dbs_info_s(cpu);
|
||||
|
||||
cs_dbs_info->down_skip = 0;
|
||||
cs_dbs_info->enable = 1;
|
||||
cs_dbs_info->requested_freq = policy->cur;
|
||||
} else {
|
||||
struct od_ops *od_ops = cdata->gov_ops;
|
||||
struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
|
||||
|
||||
od_dbs_info->rate_mult = 1;
|
||||
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
||||
od_ops->powersave_bias_init_cpu(cpu);
|
||||
}
|
||||
|
||||
/* Initiate timer time stamp */
|
||||
cpu_cdbs->time_stamp = ktime_get();
|
||||
|
||||
gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
|
||||
true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpufreq_governor_stop(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
|
||||
if (cdata->governor == GOV_CONSERVATIVE) {
|
||||
struct cs_cpu_dbs_info_s *cs_dbs_info =
|
||||
cdata->get_cpu_dbs_info_s(cpu);
|
||||
|
||||
cs_dbs_info->enable = 0;
|
||||
}
|
||||
|
||||
gov_cancel_work(dbs_data, policy);
|
||||
|
||||
mutex_destroy(&cpu_cdbs->timer_mutex);
|
||||
cpu_cdbs->cur_policy = NULL;
|
||||
}
|
||||
|
||||
static void cpufreq_governor_limits(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
|
||||
if (!cpu_cdbs->cur_policy)
|
||||
return;
|
||||
|
||||
mutex_lock(&cpu_cdbs->timer_mutex);
|
||||
if (policy->max < cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
|
||||
CPUFREQ_RELATION_H);
|
||||
else if (policy->min > cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
|
||||
CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(dbs_data, cpu);
|
||||
mutex_unlock(&cpu_cdbs->timer_mutex);
|
||||
}
|
||||
|
||||
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
struct common_dbs_data *cdata, unsigned int event)
|
||||
struct common_dbs_data *cdata, unsigned int event)
|
||||
{
|
||||
struct dbs_data *dbs_data;
|
||||
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
|
||||
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
|
||||
struct od_ops *od_ops = NULL;
|
||||
struct od_dbs_tuners *od_tuners = NULL;
|
||||
struct cs_dbs_tuners *cs_tuners = NULL;
|
||||
struct cpu_dbs_common_info *cpu_cdbs;
|
||||
unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
|
||||
int io_busy = 0;
|
||||
int rc;
|
||||
int ret = 0;
|
||||
|
||||
/* Lock governor to block concurrent initialization of governor */
|
||||
mutex_lock(&cdata->mutex);
|
||||
|
||||
if (have_governor_per_policy())
|
||||
dbs_data = policy->governor_data;
|
||||
else
|
||||
dbs_data = cdata->gdbs_data;
|
||||
|
||||
WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
|
||||
if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
switch (event) {
|
||||
case CPUFREQ_GOV_POLICY_INIT:
|
||||
if (have_governor_per_policy()) {
|
||||
WARN_ON(dbs_data);
|
||||
} else if (dbs_data) {
|
||||
dbs_data->usage_count++;
|
||||
policy->governor_data = dbs_data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
|
||||
if (!dbs_data) {
|
||||
pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dbs_data->cdata = cdata;
|
||||
dbs_data->usage_count = 1;
|
||||
rc = cdata->init(dbs_data);
|
||||
if (rc) {
|
||||
pr_err("%s: POLICY_INIT: init() failed\n", __func__);
|
||||
kfree(dbs_data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!have_governor_per_policy())
|
||||
WARN_ON(cpufreq_get_global_kobject());
|
||||
|
||||
rc = sysfs_create_group(get_governor_parent_kobj(policy),
|
||||
get_sysfs_attr(dbs_data));
|
||||
if (rc) {
|
||||
cdata->exit(dbs_data);
|
||||
kfree(dbs_data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
policy->governor_data = dbs_data;
|
||||
|
||||
/* policy latency is in ns. Convert it to us first */
|
||||
latency = policy->cpuinfo.transition_latency / 1000;
|
||||
if (latency == 0)
|
||||
latency = 1;
|
||||
|
||||
/* Bring kernel and HW constraints together */
|
||||
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
|
||||
MIN_LATENCY_MULTIPLIER * latency);
|
||||
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
|
||||
latency * LATENCY_MULTIPLIER));
|
||||
|
||||
if ((cdata->governor == GOV_CONSERVATIVE) &&
|
||||
(!policy->governor->initialized)) {
|
||||
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
|
||||
|
||||
cpufreq_register_notifier(cs_ops->notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
|
||||
if (!have_governor_per_policy())
|
||||
cdata->gdbs_data = dbs_data;
|
||||
|
||||
return 0;
|
||||
ret = cpufreq_governor_init(policy, dbs_data, cdata);
|
||||
break;
|
||||
case CPUFREQ_GOV_POLICY_EXIT:
|
||||
if (!--dbs_data->usage_count) {
|
||||
sysfs_remove_group(get_governor_parent_kobj(policy),
|
||||
get_sysfs_attr(dbs_data));
|
||||
|
||||
if (!have_governor_per_policy())
|
||||
cpufreq_put_global_kobject();
|
||||
|
||||
if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
|
||||
(policy->governor->initialized == 1)) {
|
||||
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
|
||||
|
||||
cpufreq_unregister_notifier(cs_ops->notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
|
||||
cdata->exit(dbs_data);
|
||||
kfree(dbs_data);
|
||||
cdata->gdbs_data = NULL;
|
||||
}
|
||||
|
||||
policy->governor_data = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
||||
|
||||
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
|
||||
cs_tuners = dbs_data->tuners;
|
||||
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
|
||||
sampling_rate = cs_tuners->sampling_rate;
|
||||
ignore_nice = cs_tuners->ignore_nice_load;
|
||||
} else {
|
||||
od_tuners = dbs_data->tuners;
|
||||
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
|
||||
sampling_rate = od_tuners->sampling_rate;
|
||||
ignore_nice = od_tuners->ignore_nice_load;
|
||||
od_ops = dbs_data->cdata->gov_ops;
|
||||
io_busy = od_tuners->io_is_busy;
|
||||
}
|
||||
|
||||
switch (event) {
|
||||
cpufreq_governor_exit(policy, dbs_data);
|
||||
break;
|
||||
case CPUFREQ_GOV_START:
|
||||
if (!policy->cur)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dbs_data->mutex);
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_common_info *j_cdbs =
|
||||
dbs_data->cdata->get_cpu_cdbs(j);
|
||||
unsigned int prev_load;
|
||||
|
||||
j_cdbs->cpu = j;
|
||||
j_cdbs->cur_policy = policy;
|
||||
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&j_cdbs->prev_cpu_wall, io_busy);
|
||||
|
||||
prev_load = (unsigned int)
|
||||
(j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
|
||||
j_cdbs->prev_load = 100 * prev_load /
|
||||
(unsigned int) j_cdbs->prev_cpu_wall;
|
||||
|
||||
if (ignore_nice)
|
||||
j_cdbs->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
|
||||
mutex_init(&j_cdbs->timer_mutex);
|
||||
INIT_DEFERRABLE_WORK(&j_cdbs->work,
|
||||
dbs_data->cdata->gov_dbs_timer);
|
||||
}
|
||||
|
||||
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
|
||||
cs_dbs_info->down_skip = 0;
|
||||
cs_dbs_info->enable = 1;
|
||||
cs_dbs_info->requested_freq = policy->cur;
|
||||
} else {
|
||||
od_dbs_info->rate_mult = 1;
|
||||
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
||||
od_ops->powersave_bias_init_cpu(cpu);
|
||||
}
|
||||
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
|
||||
/* Initiate timer time stamp */
|
||||
cpu_cdbs->time_stamp = ktime_get();
|
||||
|
||||
gov_queue_work(dbs_data, policy,
|
||||
delay_for_sampling_rate(sampling_rate), true);
|
||||
ret = cpufreq_governor_start(policy, dbs_data);
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_STOP:
|
||||
if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
|
||||
cs_dbs_info->enable = 0;
|
||||
|
||||
gov_cancel_work(dbs_data, policy);
|
||||
|
||||
mutex_lock(&dbs_data->mutex);
|
||||
mutex_destroy(&cpu_cdbs->timer_mutex);
|
||||
cpu_cdbs->cur_policy = NULL;
|
||||
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
|
||||
cpufreq_governor_stop(policy, dbs_data);
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
mutex_lock(&dbs_data->mutex);
|
||||
if (!cpu_cdbs->cur_policy) {
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
break;
|
||||
}
|
||||
mutex_lock(&cpu_cdbs->timer_mutex);
|
||||
if (policy->max < cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy,
|
||||
policy->max, CPUFREQ_RELATION_H);
|
||||
else if (policy->min > cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(dbs_data, cpu);
|
||||
mutex_unlock(&cpu_cdbs->timer_mutex);
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
cpufreq_governor_limits(policy, dbs_data);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&cdata->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
|
||||
|
@ -208,11 +208,16 @@ struct common_dbs_data {
|
||||
void *(*get_cpu_dbs_info_s)(int cpu);
|
||||
void (*gov_dbs_timer)(struct work_struct *work);
|
||||
void (*gov_check_cpu)(int cpu, unsigned int load);
|
||||
int (*init)(struct dbs_data *dbs_data);
|
||||
void (*exit)(struct dbs_data *dbs_data);
|
||||
int (*init)(struct dbs_data *dbs_data, bool notify);
|
||||
void (*exit)(struct dbs_data *dbs_data, bool notify);
|
||||
|
||||
/* Governor specific ops, see below */
|
||||
void *gov_ops;
|
||||
|
||||
/*
|
||||
* Protects governor's data (struct dbs_data and struct common_dbs_data)
|
||||
*/
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
/* Governor Per policy data */
|
||||
@ -221,9 +226,6 @@ struct dbs_data {
|
||||
unsigned int min_sampling_rate;
|
||||
int usage_count;
|
||||
void *tuners;
|
||||
|
||||
/* dbs_mutex protects dbs_enable in governor start/stop */
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
/* Governor specific ops, will be passed to dbs_data->gov_ops */
|
||||
@ -234,10 +236,6 @@ struct od_ops {
|
||||
void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
|
||||
};
|
||||
|
||||
struct cs_ops {
|
||||
struct notifier_block *notifier_block;
|
||||
};
|
||||
|
||||
static inline int delay_for_sampling_rate(unsigned int sampling_rate)
|
||||
{
|
||||
int delay = usecs_to_jiffies(sampling_rate);
|
||||
|
@ -475,7 +475,7 @@ static struct attribute_group od_attr_group_gov_pol = {
|
||||
|
||||
/************************** sysfs end ************************/
|
||||
|
||||
static int od_init(struct dbs_data *dbs_data)
|
||||
static int od_init(struct dbs_data *dbs_data, bool notify)
|
||||
{
|
||||
struct od_dbs_tuners *tuners;
|
||||
u64 idle_time;
|
||||
@ -513,11 +513,10 @@ static int od_init(struct dbs_data *dbs_data)
|
||||
tuners->io_is_busy = should_io_be_busy();
|
||||
|
||||
dbs_data->tuners = tuners;
|
||||
mutex_init(&dbs_data->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void od_exit(struct dbs_data *dbs_data)
|
||||
static void od_exit(struct dbs_data *dbs_data, bool notify)
|
||||
{
|
||||
kfree(dbs_data->tuners);
|
||||
}
|
||||
@ -541,6 +540,7 @@ static struct common_dbs_data od_dbs_cdata = {
|
||||
.gov_ops = &od_ops,
|
||||
.init = od_init,
|
||||
.exit = od_exit,
|
||||
.mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
|
||||
};
|
||||
|
||||
static void od_set_powersave_bias(unsigned int powersave_bias)
|
||||
|
@ -144,7 +144,7 @@ module_param(max_duration, int, 0444);
|
||||
|
||||
|
||||
/**
|
||||
* we can detect a core multipiler from dir0_lsb
|
||||
* we can detect a core multiplier from dir0_lsb
|
||||
* from GX1 datasheet p.56,
|
||||
* MULT[3:0]:
|
||||
* 0000 = SYSCLK multiplied by 4 (test only)
|
||||
@ -346,7 +346,7 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
|
||||
|
||||
/* it needs to be assured that at least one supported frequency is
|
||||
* within policy->min and policy->max. If it is not, policy->max
|
||||
* needs to be increased until one freuqency is supported.
|
||||
* needs to be increased until one frequency is supported.
|
||||
* policy->min may not be decreased, though. This way we guarantee a
|
||||
* specific processing capacity.
|
||||
*/
|
||||
|
@ -48,9 +48,9 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
|
||||
return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
|
||||
}
|
||||
|
||||
static inline int32_t div_fp(int32_t x, int32_t y)
|
||||
static inline int32_t div_fp(s64 x, s64 y)
|
||||
{
|
||||
return div_s64((int64_t)x << FRAC_BITS, y);
|
||||
return div64_s64((int64_t)x << FRAC_BITS, y);
|
||||
}
|
||||
|
||||
static inline int ceiling_fp(int32_t x)
|
||||
@ -68,6 +68,7 @@ struct sample {
|
||||
int32_t core_pct_busy;
|
||||
u64 aperf;
|
||||
u64 mperf;
|
||||
u64 tsc;
|
||||
int freq;
|
||||
ktime_t time;
|
||||
};
|
||||
@ -109,6 +110,7 @@ struct cpudata {
|
||||
ktime_t last_sample_time;
|
||||
u64 prev_aperf;
|
||||
u64 prev_mperf;
|
||||
u64 prev_tsc;
|
||||
struct sample sample;
|
||||
};
|
||||
|
||||
@ -396,7 +398,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
||||
|
||||
update_turbo_state();
|
||||
if (limits.turbo_disabled) {
|
||||
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@ -484,7 +486,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
|
||||
static void intel_pstate_hwp_enable(void)
|
||||
{
|
||||
hwp_active++;
|
||||
pr_info("intel_pstate HWP enabled\n");
|
||||
pr_info("intel_pstate: HWP enabled\n");
|
||||
|
||||
wrmsrl( MSR_PM_ENABLE, 0x1);
|
||||
}
|
||||
@ -535,7 +537,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
|
||||
|
||||
val |= vid;
|
||||
|
||||
wrmsrl(MSR_IA32_PERF_CTL, val);
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
|
||||
}
|
||||
|
||||
#define BYT_BCLK_FREQS 5
|
||||
@ -704,19 +706,20 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
||||
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
|
||||
}
|
||||
|
||||
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
||||
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
|
||||
{
|
||||
int max_perf, min_perf;
|
||||
|
||||
update_turbo_state();
|
||||
if (force) {
|
||||
update_turbo_state();
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
|
||||
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
|
||||
|
||||
pstate = clamp_t(int, pstate, min_perf, max_perf);
|
||||
|
||||
if (pstate == cpu->pstate.current_pstate)
|
||||
return;
|
||||
pstate = clamp_t(int, pstate, min_perf, max_perf);
|
||||
|
||||
if (pstate == cpu->pstate.current_pstate)
|
||||
return;
|
||||
}
|
||||
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
|
||||
|
||||
cpu->pstate.current_pstate = pstate;
|
||||
@ -733,7 +736,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
||||
|
||||
if (pstate_funcs.get_vid)
|
||||
pstate_funcs.get_vid(cpu);
|
||||
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
|
||||
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
|
||||
}
|
||||
|
||||
static inline void intel_pstate_calc_busy(struct cpudata *cpu)
|
||||
@ -756,23 +759,28 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
|
||||
{
|
||||
u64 aperf, mperf;
|
||||
unsigned long flags;
|
||||
u64 tsc;
|
||||
|
||||
local_irq_save(flags);
|
||||
rdmsrl(MSR_IA32_APERF, aperf);
|
||||
rdmsrl(MSR_IA32_MPERF, mperf);
|
||||
tsc = native_read_tsc();
|
||||
local_irq_restore(flags);
|
||||
|
||||
cpu->last_sample_time = cpu->sample.time;
|
||||
cpu->sample.time = ktime_get();
|
||||
cpu->sample.aperf = aperf;
|
||||
cpu->sample.mperf = mperf;
|
||||
cpu->sample.tsc = tsc;
|
||||
cpu->sample.aperf -= cpu->prev_aperf;
|
||||
cpu->sample.mperf -= cpu->prev_mperf;
|
||||
cpu->sample.tsc -= cpu->prev_tsc;
|
||||
|
||||
intel_pstate_calc_busy(cpu);
|
||||
|
||||
cpu->prev_aperf = aperf;
|
||||
cpu->prev_mperf = mperf;
|
||||
cpu->prev_tsc = tsc;
|
||||
}
|
||||
|
||||
static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
|
||||
@ -794,7 +802,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
|
||||
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||
{
|
||||
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
|
||||
u32 duration_us;
|
||||
s64 duration_us;
|
||||
u32 sample_time;
|
||||
|
||||
/*
|
||||
@ -821,8 +829,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||
* to adjust our busyness.
|
||||
*/
|
||||
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
|
||||
duration_us = (u32) ktime_us_delta(cpu->sample.time,
|
||||
cpu->last_sample_time);
|
||||
duration_us = ktime_us_delta(cpu->sample.time,
|
||||
cpu->last_sample_time);
|
||||
if (duration_us > sample_time * 3) {
|
||||
sample_ratio = div_fp(int_tofp(sample_time),
|
||||
int_tofp(duration_us));
|
||||
@ -837,6 +845,10 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
|
||||
int32_t busy_scaled;
|
||||
struct _pid *pid;
|
||||
signed int ctl;
|
||||
int from;
|
||||
struct sample *sample;
|
||||
|
||||
from = cpu->pstate.current_pstate;
|
||||
|
||||
pid = &cpu->pid;
|
||||
busy_scaled = intel_pstate_get_scaled_busy(cpu);
|
||||
@ -844,7 +856,17 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
|
||||
ctl = pid_calc(pid, busy_scaled);
|
||||
|
||||
/* Negative values of ctl increase the pstate and vice versa */
|
||||
intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
|
||||
intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
|
||||
|
||||
sample = &cpu->sample;
|
||||
trace_pstate_sample(fp_toint(sample->core_pct_busy),
|
||||
fp_toint(busy_scaled),
|
||||
from,
|
||||
cpu->pstate.current_pstate,
|
||||
sample->mperf,
|
||||
sample->aperf,
|
||||
sample->tsc,
|
||||
sample->freq);
|
||||
}
|
||||
|
||||
static void intel_hwp_timer_func(unsigned long __data)
|
||||
@ -858,21 +880,11 @@ static void intel_hwp_timer_func(unsigned long __data)
|
||||
static void intel_pstate_timer_func(unsigned long __data)
|
||||
{
|
||||
struct cpudata *cpu = (struct cpudata *) __data;
|
||||
struct sample *sample;
|
||||
|
||||
intel_pstate_sample(cpu);
|
||||
|
||||
sample = &cpu->sample;
|
||||
|
||||
intel_pstate_adjust_busy_pstate(cpu);
|
||||
|
||||
trace_pstate_sample(fp_toint(sample->core_pct_busy),
|
||||
fp_toint(intel_pstate_get_scaled_busy(cpu)),
|
||||
cpu->pstate.current_pstate,
|
||||
sample->mperf,
|
||||
sample->aperf,
|
||||
sample->freq);
|
||||
|
||||
intel_pstate_set_sample_time(cpu);
|
||||
}
|
||||
|
||||
@ -935,7 +947,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
|
||||
|
||||
add_timer_on(&cpu->timer, cpunum);
|
||||
|
||||
pr_debug("Intel pstate controlling: cpu %d\n", cpunum);
|
||||
pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1001,13 +1013,13 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
|
||||
int cpu_num = policy->cpu;
|
||||
struct cpudata *cpu = all_cpu_data[cpu_num];
|
||||
|
||||
pr_info("intel_pstate CPU %d exiting\n", cpu_num);
|
||||
pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
|
||||
|
||||
del_timer_sync(&all_cpu_data[cpu_num]->timer);
|
||||
if (hwp_active)
|
||||
return;
|
||||
|
||||
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
|
||||
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
|
||||
}
|
||||
|
||||
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
|
@ -56,7 +56,7 @@ module_param(pxa27x_maxfreq, uint, 0);
|
||||
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
|
||||
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
|
||||
|
||||
typedef struct {
|
||||
struct pxa_freqs {
|
||||
unsigned int khz;
|
||||
unsigned int membus;
|
||||
unsigned int cccr;
|
||||
@ -64,7 +64,7 @@ typedef struct {
|
||||
unsigned int cclkcfg;
|
||||
int vmin;
|
||||
int vmax;
|
||||
} pxa_freqs_t;
|
||||
};
|
||||
|
||||
/* Define the refresh period in mSec for the SDRAM and the number of rows */
|
||||
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
|
||||
@ -86,7 +86,7 @@ static unsigned int sdram_rows;
|
||||
/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
|
||||
#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
|
||||
|
||||
static pxa_freqs_t pxa255_run_freqs[] =
|
||||
static const struct pxa_freqs pxa255_run_freqs[] =
|
||||
{
|
||||
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
|
||||
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
|
||||
@ -98,7 +98,7 @@ static pxa_freqs_t pxa255_run_freqs[] =
|
||||
};
|
||||
|
||||
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
|
||||
static pxa_freqs_t pxa255_turbo_freqs[] =
|
||||
static const struct pxa_freqs pxa255_turbo_freqs[] =
|
||||
{
|
||||
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
|
||||
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
|
||||
@ -153,7 +153,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table
|
||||
((HT) ? CCLKCFG_HALFTURBO : 0) | \
|
||||
((T) ? CCLKCFG_TURBO : 0))
|
||||
|
||||
static pxa_freqs_t pxa27x_freqs[] = {
|
||||
static struct pxa_freqs pxa27x_freqs[] = {
|
||||
{104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
|
||||
{156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
|
||||
{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
|
||||
@ -171,7 +171,7 @@ extern unsigned get_clk_frequency_khz(int info);
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
|
||||
static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
|
||||
static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
|
||||
{
|
||||
int ret = 0;
|
||||
int vmin, vmax;
|
||||
@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
|
||||
}
|
||||
}
|
||||
#else
|
||||
static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
|
||||
static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -211,7 +211,7 @@ static void __init pxa_cpufreq_init_voltages(void) { }
|
||||
#endif
|
||||
|
||||
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
|
||||
pxa_freqs_t **pxa_freqs)
|
||||
const struct pxa_freqs **pxa_freqs)
|
||||
{
|
||||
if (cpu_is_pxa25x()) {
|
||||
if (!pxa255_turbo_table) {
|
||||
@ -270,7 +270,7 @@ static unsigned int pxa_cpufreq_get(unsigned int cpu)
|
||||
static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
|
||||
{
|
||||
struct cpufreq_frequency_table *pxa_freqs_table;
|
||||
pxa_freqs_t *pxa_freq_settings;
|
||||
const struct pxa_freqs *pxa_freq_settings;
|
||||
unsigned long flags;
|
||||
unsigned int new_freq_cpu, new_freq_mem;
|
||||
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
|
||||
@ -361,7 +361,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
|
||||
int i;
|
||||
unsigned int freq;
|
||||
struct cpufreq_frequency_table *pxa255_freq_table;
|
||||
pxa_freqs_t *pxa255_freqs;
|
||||
const struct pxa_freqs *pxa255_freqs;
|
||||
|
||||
/* try to guess pxa27x cpu */
|
||||
if (cpu_is_pxa27x())
|
||||
|
@ -27,11 +27,11 @@
|
||||
|
||||
/**
|
||||
* struct cpu_data
|
||||
* @parent: the parent node of cpu clock
|
||||
* @pclk: the parent clock of cpu
|
||||
* @table: frequency table
|
||||
*/
|
||||
struct cpu_data {
|
||||
struct device_node *parent;
|
||||
struct clk **pclk;
|
||||
struct cpufreq_frequency_table *table;
|
||||
};
|
||||
|
||||
@ -196,7 +196,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
|
||||
|
||||
static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device_node *np, *pnode;
|
||||
int i, count, ret;
|
||||
u32 freq, mask;
|
||||
struct clk *clk;
|
||||
@ -219,17 +219,23 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
goto err_nomem2;
|
||||
}
|
||||
|
||||
data->parent = of_parse_phandle(np, "clocks", 0);
|
||||
if (!data->parent) {
|
||||
pnode = of_parse_phandle(np, "clocks", 0);
|
||||
if (!pnode) {
|
||||
pr_err("%s: could not get clock information\n", __func__);
|
||||
goto err_nomem2;
|
||||
}
|
||||
|
||||
count = of_property_count_strings(data->parent, "clock-names");
|
||||
count = of_property_count_strings(pnode, "clock-names");
|
||||
data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
|
||||
if (!data->pclk) {
|
||||
pr_err("%s: no memory\n", __func__);
|
||||
goto err_node;
|
||||
}
|
||||
|
||||
table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
|
||||
if (!table) {
|
||||
pr_err("%s: no memory\n", __func__);
|
||||
goto err_node;
|
||||
goto err_pclk;
|
||||
}
|
||||
|
||||
if (fmask)
|
||||
@ -238,7 +244,8 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
mask = 0x0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
clk = of_clk_get(data->parent, i);
|
||||
clk = of_clk_get(pnode, i);
|
||||
data->pclk[i] = clk;
|
||||
freq = clk_get_rate(clk);
|
||||
/*
|
||||
* the clock is valid if its frequency is not masked
|
||||
@ -273,13 +280,16 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
policy->cpuinfo.transition_latency = u64temp + 1;
|
||||
|
||||
of_node_put(np);
|
||||
of_node_put(pnode);
|
||||
|
||||
return 0;
|
||||
|
||||
err_nomem1:
|
||||
kfree(table);
|
||||
err_pclk:
|
||||
kfree(data->pclk);
|
||||
err_node:
|
||||
of_node_put(data->parent);
|
||||
of_node_put(pnode);
|
||||
err_nomem2:
|
||||
policy->driver_data = NULL;
|
||||
kfree(data);
|
||||
@ -293,7 +303,7 @@ static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpu_data *data = policy->driver_data;
|
||||
|
||||
of_node_put(data->parent);
|
||||
kfree(data->pclk);
|
||||
kfree(data->table);
|
||||
kfree(data);
|
||||
policy->driver_data = NULL;
|
||||
@ -307,7 +317,7 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
|
||||
struct clk *parent;
|
||||
struct cpu_data *data = policy->driver_data;
|
||||
|
||||
parent = of_clk_get(data->parent, data->table[index].driver_data);
|
||||
parent = data->pclk[data->table[index].driver_data];
|
||||
return clk_set_parent(policy->clk, parent);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,9 @@ struct cpufreq_policy {
|
||||
|
||||
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
|
||||
should set cpufreq */
|
||||
unsigned int cpu; /* cpu nr of CPU managing this policy */
|
||||
unsigned int cpu; /* cpu managing this policy, must be online */
|
||||
unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
|
||||
|
||||
struct clk *clk;
|
||||
struct cpufreq_cpuinfo cpuinfo;/* see above */
|
||||
|
||||
@ -80,6 +82,7 @@ struct cpufreq_policy {
|
||||
struct cpufreq_governor *governor; /* see below */
|
||||
void *governor_data;
|
||||
bool governor_enabled; /* governor start/stop flag */
|
||||
char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
|
||||
|
||||
struct work_struct update; /* if update_policy() needs to be
|
||||
* called, but you're in IRQ context */
|
||||
|
@ -42,45 +42,54 @@ TRACE_EVENT(pstate_sample,
|
||||
|
||||
TP_PROTO(u32 core_busy,
|
||||
u32 scaled_busy,
|
||||
u32 state,
|
||||
u32 from,
|
||||
u32 to,
|
||||
u64 mperf,
|
||||
u64 aperf,
|
||||
u64 tsc,
|
||||
u32 freq
|
||||
),
|
||||
|
||||
TP_ARGS(core_busy,
|
||||
scaled_busy,
|
||||
state,
|
||||
from,
|
||||
to,
|
||||
mperf,
|
||||
aperf,
|
||||
tsc,
|
||||
freq
|
||||
),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, core_busy)
|
||||
__field(u32, scaled_busy)
|
||||
__field(u32, state)
|
||||
__field(u32, from)
|
||||
__field(u32, to)
|
||||
__field(u64, mperf)
|
||||
__field(u64, aperf)
|
||||
__field(u64, tsc)
|
||||
__field(u32, freq)
|
||||
|
||||
),
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->core_busy = core_busy;
|
||||
__entry->scaled_busy = scaled_busy;
|
||||
__entry->state = state;
|
||||
__entry->from = from;
|
||||
__entry->to = to;
|
||||
__entry->mperf = mperf;
|
||||
__entry->aperf = aperf;
|
||||
__entry->tsc = tsc;
|
||||
__entry->freq = freq;
|
||||
),
|
||||
|
||||
TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
|
||||
TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ",
|
||||
(unsigned long)__entry->core_busy,
|
||||
(unsigned long)__entry->scaled_busy,
|
||||
(unsigned long)__entry->state,
|
||||
(unsigned long)__entry->from,
|
||||
(unsigned long)__entry->to,
|
||||
(unsigned long long)__entry->mperf,
|
||||
(unsigned long long)__entry->aperf,
|
||||
(unsigned long long)__entry->tsc,
|
||||
(unsigned long)__entry->freq
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user