forked from Minki/linux
d417e0691a
By design, cpufreq drivers are responsible for calling cpufreq_frequency_table_cpuinfo() from their ->init() callbacks to validate the frequency table. However, if a cpufreq driver is buggy and fails to do so properly, it lead to unexpected behavior of the driver or the cpufreq core at a later point in time. It would be better if the core could validate the frequency table during driver initialization. To that end, introduce cpufreq_table_validate_and_sort() and make the cpufreq core call it right after invoking the ->init() callback of the driver and destroy the cpufreq policy if the table is invalid. For the time being the validation of the table happens twice, once from the driver and then from the core. The individual drivers will be updated separately to drop table validation if they don't need it for other reasons. The frequency table is marked "sorted" or "unsorted" by the new helper now instead of in cpufreq_table_validate_and_show(), as it should only be done after validating the table (which the drivers won't do going forward). Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> [ rjw: Subject/changelog ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
386 lines
9.2 KiB
C
386 lines
9.2 KiB
C
/*
|
|
* linux/drivers/cpufreq/freq_table.c
|
|
*
|
|
* Copyright (C) 2002 - 2003 Dominik Brodowski
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/cpufreq.h>
|
|
#include <linux/module.h>
|
|
|
|
/*********************************************************************
|
|
* FREQUENCY TABLE HELPERS *
|
|
*********************************************************************/
|
|
|
|
bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
|
{
|
|
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
|
|
|
|
if (!table)
|
|
return false;
|
|
|
|
cpufreq_for_each_valid_entry(pos, table)
|
|
if (pos->flags & CPUFREQ_BOOST_FREQ)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(policy_has_boost_freq);
|
|
|
|
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
|
struct cpufreq_frequency_table *table)
|
|
{
|
|
struct cpufreq_frequency_table *pos;
|
|
unsigned int min_freq = ~0;
|
|
unsigned int max_freq = 0;
|
|
unsigned int freq;
|
|
|
|
cpufreq_for_each_valid_entry(pos, table) {
|
|
freq = pos->frequency;
|
|
|
|
if (!cpufreq_boost_enabled()
|
|
&& (pos->flags & CPUFREQ_BOOST_FREQ))
|
|
continue;
|
|
|
|
pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
|
|
if (freq < min_freq)
|
|
min_freq = freq;
|
|
if (freq > max_freq)
|
|
max_freq = freq;
|
|
}
|
|
|
|
policy->min = policy->cpuinfo.min_freq = min_freq;
|
|
policy->max = policy->cpuinfo.max_freq = max_freq;
|
|
|
|
if (policy->min == ~0)
|
|
return -EINVAL;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
|
struct cpufreq_frequency_table *table)
|
|
{
|
|
struct cpufreq_frequency_table *pos;
|
|
unsigned int freq, next_larger = ~0;
|
|
bool found = false;
|
|
|
|
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
|
|
policy->min, policy->max, policy->cpu);
|
|
|
|
cpufreq_verify_within_cpu_limits(policy);
|
|
|
|
cpufreq_for_each_valid_entry(pos, table) {
|
|
freq = pos->frequency;
|
|
|
|
if ((freq >= policy->min) && (freq <= policy->max)) {
|
|
found = true;
|
|
break;
|
|
}
|
|
|
|
if ((next_larger > freq) && (freq > policy->max))
|
|
next_larger = freq;
|
|
}
|
|
|
|
if (!found) {
|
|
policy->max = next_larger;
|
|
cpufreq_verify_within_cpu_limits(policy);
|
|
}
|
|
|
|
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
|
|
policy->min, policy->max, policy->cpu);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
|
|
|
|
/*
|
|
* Generic routine to verify policy & frequency table, requires driver to set
|
|
* policy->freq_table prior to it.
|
|
*/
|
|
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
|
|
{
|
|
if (!policy->freq_table)
|
|
return -ENODEV;
|
|
|
|
return cpufreq_frequency_table_verify(policy, policy->freq_table);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
|
|
|
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
|
unsigned int target_freq,
|
|
unsigned int relation)
|
|
{
|
|
struct cpufreq_frequency_table optimal = {
|
|
.driver_data = ~0,
|
|
.frequency = 0,
|
|
};
|
|
struct cpufreq_frequency_table suboptimal = {
|
|
.driver_data = ~0,
|
|
.frequency = 0,
|
|
};
|
|
struct cpufreq_frequency_table *pos;
|
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
|
unsigned int freq, diff, i = 0;
|
|
int index;
|
|
|
|
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
|
|
target_freq, relation, policy->cpu);
|
|
|
|
switch (relation) {
|
|
case CPUFREQ_RELATION_H:
|
|
suboptimal.frequency = ~0;
|
|
break;
|
|
case CPUFREQ_RELATION_L:
|
|
case CPUFREQ_RELATION_C:
|
|
optimal.frequency = ~0;
|
|
break;
|
|
}
|
|
|
|
cpufreq_for_each_valid_entry_idx(pos, table, i) {
|
|
freq = pos->frequency;
|
|
|
|
if ((freq < policy->min) || (freq > policy->max))
|
|
continue;
|
|
if (freq == target_freq) {
|
|
optimal.driver_data = i;
|
|
break;
|
|
}
|
|
switch (relation) {
|
|
case CPUFREQ_RELATION_H:
|
|
if (freq < target_freq) {
|
|
if (freq >= optimal.frequency) {
|
|
optimal.frequency = freq;
|
|
optimal.driver_data = i;
|
|
}
|
|
} else {
|
|
if (freq <= suboptimal.frequency) {
|
|
suboptimal.frequency = freq;
|
|
suboptimal.driver_data = i;
|
|
}
|
|
}
|
|
break;
|
|
case CPUFREQ_RELATION_L:
|
|
if (freq > target_freq) {
|
|
if (freq <= optimal.frequency) {
|
|
optimal.frequency = freq;
|
|
optimal.driver_data = i;
|
|
}
|
|
} else {
|
|
if (freq >= suboptimal.frequency) {
|
|
suboptimal.frequency = freq;
|
|
suboptimal.driver_data = i;
|
|
}
|
|
}
|
|
break;
|
|
case CPUFREQ_RELATION_C:
|
|
diff = abs(freq - target_freq);
|
|
if (diff < optimal.frequency ||
|
|
(diff == optimal.frequency &&
|
|
freq > table[optimal.driver_data].frequency)) {
|
|
optimal.frequency = diff;
|
|
optimal.driver_data = i;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
if (optimal.driver_data > i) {
|
|
if (suboptimal.driver_data > i) {
|
|
WARN(1, "Invalid frequency table: %d\n", policy->cpu);
|
|
return 0;
|
|
}
|
|
|
|
index = suboptimal.driver_data;
|
|
} else
|
|
index = optimal.driver_data;
|
|
|
|
pr_debug("target index is %u, freq is:%u kHz\n", index,
|
|
table[index].frequency);
|
|
return index;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_table_index_unsorted);
|
|
|
|
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
|
unsigned int freq)
|
|
{
|
|
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
|
|
int idx;
|
|
|
|
if (unlikely(!table)) {
|
|
pr_debug("%s: Unable to find frequency table\n", __func__);
|
|
return -ENOENT;
|
|
}
|
|
|
|
cpufreq_for_each_valid_entry_idx(pos, table, idx)
|
|
if (pos->frequency == freq)
|
|
return idx;
|
|
|
|
return -EINVAL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
|
|
|
|
/**
|
|
* show_available_freqs - show available frequencies for the specified CPU
|
|
*/
|
|
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
|
|
bool show_boost)
|
|
{
|
|
ssize_t count = 0;
|
|
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
|
|
|
|
if (!table)
|
|
return -ENODEV;
|
|
|
|
cpufreq_for_each_valid_entry(pos, table) {
|
|
/*
|
|
* show_boost = true and driver_data = BOOST freq
|
|
* display BOOST freqs
|
|
*
|
|
* show_boost = false and driver_data = BOOST freq
|
|
* show_boost = true and driver_data != BOOST freq
|
|
* continue - do not display anything
|
|
*
|
|
* show_boost = false and driver_data != BOOST freq
|
|
* display NON BOOST freqs
|
|
*/
|
|
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
|
|
continue;
|
|
|
|
count += sprintf(&buf[count], "%d ", pos->frequency);
|
|
}
|
|
count += sprintf(&buf[count], "\n");
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
#define cpufreq_attr_available_freq(_name) \
|
|
struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
|
|
__ATTR_RO(_name##_frequencies)
|
|
|
|
/**
|
|
* show_scaling_available_frequencies - show available normal frequencies for
|
|
* the specified CPU
|
|
*/
|
|
static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
|
|
char *buf)
|
|
{
|
|
return show_available_freqs(policy, buf, false);
|
|
}
|
|
cpufreq_attr_available_freq(scaling_available);
|
|
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
|
|
|
|
/**
|
|
* show_available_boost_freqs - show available boost frequencies for
|
|
* the specified CPU
|
|
*/
|
|
static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
|
|
char *buf)
|
|
{
|
|
return show_available_freqs(policy, buf, true);
|
|
}
|
|
cpufreq_attr_available_freq(scaling_boost);
|
|
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
|
|
|
|
struct freq_attr *cpufreq_generic_attr[] = {
|
|
&cpufreq_freq_attr_scaling_available_freqs,
|
|
#ifdef CONFIG_CPU_FREQ_BOOST_SW
|
|
&cpufreq_freq_attr_scaling_boost_freqs,
|
|
#endif
|
|
NULL,
|
|
};
|
|
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
|
|
|
|
static int set_freq_table_sorted(struct cpufreq_policy *policy)
|
|
{
|
|
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
|
|
struct cpufreq_frequency_table *prev = NULL;
|
|
int ascending = 0;
|
|
|
|
policy->freq_table_sorted = CPUFREQ_TABLE_UNSORTED;
|
|
|
|
cpufreq_for_each_valid_entry(pos, table) {
|
|
if (!prev) {
|
|
prev = pos;
|
|
continue;
|
|
}
|
|
|
|
if (pos->frequency == prev->frequency) {
|
|
pr_warn("Duplicate freq-table entries: %u\n",
|
|
pos->frequency);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Frequency increased from prev to pos */
|
|
if (pos->frequency > prev->frequency) {
|
|
/* But frequency was decreasing earlier */
|
|
if (ascending < 0) {
|
|
pr_debug("Freq table is unsorted\n");
|
|
return 0;
|
|
}
|
|
|
|
ascending++;
|
|
} else {
|
|
/* Frequency decreased from prev to pos */
|
|
|
|
/* But frequency was increasing earlier */
|
|
if (ascending > 0) {
|
|
pr_debug("Freq table is unsorted\n");
|
|
return 0;
|
|
}
|
|
|
|
ascending--;
|
|
}
|
|
|
|
prev = pos;
|
|
}
|
|
|
|
if (ascending > 0)
|
|
policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_ASCENDING;
|
|
else
|
|
policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_DESCENDING;
|
|
|
|
pr_debug("Freq table is sorted in %s order\n",
|
|
ascending > 0 ? "ascending" : "descending");
|
|
|
|
return 0;
|
|
}
|
|
|
|
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
|
|
struct cpufreq_frequency_table *table)
|
|
{
|
|
int ret;
|
|
|
|
ret = cpufreq_frequency_table_cpuinfo(policy, table);
|
|
if (ret)
|
|
return ret;
|
|
|
|
policy->freq_table = table;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
|
|
|
|
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
|
|
{
|
|
int ret;
|
|
|
|
if (!policy->freq_table)
|
|
return 0;
|
|
|
|
ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return set_freq_table_sorted(policy);
|
|
}
|
|
|
|
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
|
|
MODULE_DESCRIPTION("CPUfreq frequency table helpers");
|
|
MODULE_LICENSE("GPL");
|