Merge branch 'pm-cpufreq'

Merge cpufreq updates for 5.19-rc1:

 - Fix cpufreq governor clean up code to avoid using kfree() directly
   to free kobject-based items (Kevin Hao).

 - Prepare cpufreq for powerpc's asm/prom.h cleanup (Christophe Leroy).

 - Make intel_pstate notify frequency invariance code when no_turbo is
   turned on and off (Chen Yu).

 - Add Sapphire Rapids OOB mode support to intel_pstate (Srinivas
   Pandruvada).

 - Make cpufreq avoid unnecessary frequency updates due to mismatch
   between hardware and the frequency table (Viresh Kumar).

 - Make remove_cpu_dev_symlink() clear the real_cpus mask to simplify
   code (Viresh Kumar).

 - Rearrange cpufreq_offline() and cpufreq_remove_dev() to make the
   calling convention for some driver callbacks consistent (Rafael
   Wysocki).

 - Avoid accessing half-initialized cpufreq policies from the show()
   and store() sysfs functions (Schspa Shi).

 - Rearrange cpufreq_offline() to make the calling convention for some
   driver callbacks consistent (Schspa Shi).

 - Update CPPC handling in cpufreq (Pierre Gondois):

   * Add per_cpu efficiency_class to the CPPC driver.
   * Make the CPPC driver Register EM based on efficiency class
     information.
   * Adjust _OSC for flexible address space in the ACPI platform
     initialization code and always set CPPC _OSC bits if CPPC_LIB is
     supported.
   * Assume no transition latency if no PCCT in the CPPC driver.
   * Add fast_switch and dvfs_possible_from_any_cpu support to the CPPC
     driver.

* pm-cpufreq:
  cpufreq: CPPC: Enable dvfs_possible_from_any_cpu
  cpufreq: CPPC: Enable fast_switch
  ACPI: CPPC: Assume no transition latency if no PCCT
  ACPI: bus: Set CPPC _OSC bits for all and when CPPC_LIB is supported
  ACPI: CPPC: Check _OSC for flexible address space
  cpufreq: make interface functions and lock holding state clear
  cpufreq: Abort show()/store() for half-initialized policies
  cpufreq: Rearrange locking in cpufreq_remove_dev()
  cpufreq: Split cpufreq_offline()
  cpufreq: Reorganize checks in cpufreq_offline()
  cpufreq: Clear real_cpus mask from remove_cpu_dev_symlink()
  cpufreq: intel_pstate: Support Sapphire Rapids OOB mode
  Revert "cpufreq: Fix possible race in cpufreq online error path"
  cpufreq: CPPC: Register EM based on efficiency class information
  cpufreq: CPPC: Add per_cpu efficiency_class
  cpufreq: Avoid unnecessary frequency updates due to mismatch
  cpufreq: Fix possible race in cpufreq online error path
  cpufreq: intel_pstate: Handle no_turbo in frequency invariance
  cpufreq: Prepare cleanup of powerpc's asm/prom.h
  cpufreq: governor: Use kobject release() method to free dbs_data
This commit is contained in:
Rafael J. Wysocki 2022-05-23 19:28:41 +02:00
commit d988c91342
15 changed files with 376 additions and 64 deletions

View File

@ -512,6 +512,7 @@ struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
{
return &cpu_madt_gicc[cpu];
}
EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc);
/*
* acpi_map_gic_cpu_interface - parse processor MADT entry

View File

@ -278,6 +278,20 @@ bool osc_sb_apei_support_acked;
bool osc_pc_lpi_support_confirmed;
EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
/*
* ACPI 6.2 Section 6.2.11.2 'Platform-Wide OSPM Capabilities':
* Starting with ACPI Specification 6.2, all _CPC registers can be in
* PCC, System Memory, System IO, or Functional Fixed Hardware address
* spaces. OSPM support for this more flexible register space scheme is
* indicated by the Flexible Address Space for CPPC Registers _OSC bit.
*
* Otherwise (cf ACPI 6.1, s8.4.7.1.1.X), _CPC registers must be in:
* - PCC or Functional Fixed Hardware address space if defined
* - SystemMemory address space (NULL register) if not defined
*/
bool osc_cpc_flexible_adr_space_confirmed;
EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed);
/*
* ACPI 6.4 Operating System Capabilities for USB.
*/
@ -315,12 +329,15 @@ static void acpi_bus_osc_negotiate_platform_control(void)
#endif
#ifdef CONFIG_X86
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
if (boot_cpu_has(X86_FEATURE_HWP)) {
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
}
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
#endif
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_FLEXIBLE_ADR_SPACE;
if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
@ -341,10 +358,9 @@ static void acpi_bus_osc_negotiate_platform_control(void)
return;
}
#ifdef CONFIG_X86
if (boot_cpu_has(X86_FEATURE_HWP))
osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
(OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
#ifdef CONFIG_ACPI_CPPC_LIB
osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
(OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
#endif
/*
@ -366,6 +382,8 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
osc_sb_native_usb4_support_confirmed =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
osc_cpc_flexible_adr_space_confirmed =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPC_FLEXIBLE_ADR_SPACE;
}
kfree(context.ret.pointer);

View File

@ -100,6 +100,16 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM)
/* Check if a CPC register is in SystemMemory */
#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_SYSTEM_MEMORY)
/* Check if a CPC register is in SystemIo */
#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_SYSTEM_IO)
/* Evaluates to True if reg is a NULL register descriptor */
#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
(reg)->address == 0 && \
@ -424,6 +434,24 @@ bool acpi_cpc_valid(void)
}
EXPORT_SYMBOL_GPL(acpi_cpc_valid);
bool cppc_allow_fast_switch(void)
{
struct cpc_register_resource *desired_reg;
struct cpc_desc *cpc_ptr;
int cpu;
for_each_possible_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
!CPC_IN_SYSTEM_IO(desired_reg))
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
/**
* acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
* @cpu: Find all CPUs that share a domain with cpu.
@ -736,6 +764,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (gas_t->address) {
void __iomem *addr;
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
goto out_free;
}
addr = ioremap(gas_t->address, gas_t->bit_width/8);
if (!addr)
goto out_free;
@ -758,6 +791,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
gas_t->address);
goto out_free;
}
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
goto out_free;
}
} else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
/* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
@ -1447,6 +1484,9 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
* transition latency for performance change requests. The closest we have
* is the timing information from the PCCT tables which provides the info
* on the number and frequency of PCC commands the platform can handle.
*
* If desired_reg is in the SystemMemory or SystemIo ACPI address space,
* then assume there is no latency.
*/
unsigned int cppc_get_transition_latency(int cpu_num)
{
@ -1472,7 +1512,9 @@ unsigned int cppc_get_transition_latency(int cpu_num)
return CPUFREQ_ETERNAL;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (!CPC_IN_PCC(desired_reg))
if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
return 0;
else if (!CPC_IN_PCC(desired_reg))
return CPUFREQ_ETERNAL;
if (pcc_ss_id < 0)

View File

@ -389,6 +389,27 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
return ret;
}
static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
u32 desired_perf;
int ret;
desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
cpu_data->perf_ctrls.desired_perf = desired_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
if (ret) {
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
cpu, ret);
return 0;
}
return target_freq;
}
static int cppc_verify_policy(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
@ -420,12 +441,197 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
static DEFINE_PER_CPU(unsigned int, efficiency_class);
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP (20)
/* Increase the cost value by CPPC_EM_COST_STEP every performance state. */
#define CPPC_EM_COST_STEP (1)
/* Add a cost gap correspnding to the energy of 4 CPUs. */
#define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
/ CPPC_EM_CAP_STEP)
static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
{
struct cppc_perf_caps *perf_caps;
unsigned int min_cap, max_cap;
struct cppc_cpudata *cpu_data;
int cpu = policy->cpu;
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu);
min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
if ((min_cap == 0) || (max_cap < min_cap))
return 0;
return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
}
/*
* The cost is defined as:
* cost = power * max_frequency / frequency
*/
static inline unsigned long compute_cost(int cpu, int step)
{
return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
step * CPPC_EM_COST_STEP;
}
static int cppc_get_cpu_power(struct device *cpu_dev,
unsigned long *power, unsigned long *KHz)
{
unsigned long perf_step, perf_prev, perf, perf_check;
unsigned int min_step, max_step, step, step_check;
unsigned long prev_freq = *KHz;
unsigned int min_cap, max_cap;
struct cpufreq_policy *policy;
struct cppc_perf_caps *perf_caps;
struct cppc_cpudata *cpu_data;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
min_cap = div_u64(max_cap * perf_caps->lowest_perf,
perf_caps->highest_perf);
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
step = perf_prev / perf_step;
if (step > max_step)
return -EINVAL;
if (min_step == max_step) {
step = max_step;
perf = perf_caps->highest_perf;
} else if (step < min_step) {
step = min_step;
perf = perf_caps->lowest_perf;
} else {
step++;
if (step == max_step)
perf = perf_caps->highest_perf;
else
perf = step * perf_step;
}
*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
step_check = perf_check / perf_step;
/*
* To avoid bad integer approximation, check that new frequency value
* increased and that the new frequency will be converted to the
* desired step value.
*/
while ((*KHz == prev_freq) || (step_check != step)) {
perf++;
*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
step_check = perf_check / perf_step;
}
/*
* With an artificial EM, only the cost value is used. Still the power
* is populated such as 0 < power < EM_MAX_POWER. This allows to add
* more sense to the artificial performance states.
*/
*power = compute_cost(cpu_dev->id, step);
return 0;
}
static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
unsigned long *cost)
{
unsigned long perf_step, perf_prev;
struct cppc_perf_caps *perf_caps;
struct cpufreq_policy *policy;
struct cppc_cpudata *cpu_data;
unsigned int max_cap;
int step;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
step = perf_prev / perf_step;
*cost = compute_cost(cpu_dev->id, step);
return 0;
}
static int populate_efficiency_class(void)
{
struct acpi_madt_generic_interrupt *gicc;
DECLARE_BITMAP(used_classes, 256) = {};
int class, cpu, index;
for_each_possible_cpu(cpu) {
gicc = acpi_cpu_get_madt_gicc(cpu);
class = gicc->efficiency_class;
bitmap_set(used_classes, class, 1);
}
if (bitmap_weight(used_classes, 256) <= 1) {
pr_debug("Efficiency classes are all equal (=%d). "
"No EM registered", class);
return -EINVAL;
}
/*
* Squeeze efficiency class values on [0:#efficiency_class-1].
* Values are per spec in [0:255].
*/
index = 0;
for_each_set_bit(class, used_classes, 256) {
for_each_possible_cpu(cpu) {
gicc = acpi_cpu_get_madt_gicc(cpu);
if (gicc->efficiency_class == class)
per_cpu(efficiency_class, cpu) = index;
}
index++;
}
cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
return 0;
}
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data;
struct em_data_callback em_cb =
EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
cpu_data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu),
get_perf_level_count(policy), &em_cb,
cpu_data->shared_cpu_map, 0);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
static int populate_efficiency_class(void)
{
return 0;
}
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
{
}
#endif
@ -536,6 +742,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto out;
}
policy->fast_switch_possible = cppc_allow_fast_switch();
policy->dvfs_possible_from_any_cpu = true;
/*
* If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
* is supported.
@ -681,6 +890,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
.fast_switch = cppc_cpufreq_fast_switch,
.init = cppc_cpufreq_cpu_init,
.exit = cppc_cpufreq_cpu_exit,
.set_boost = cppc_cpufreq_set_boost,
@ -742,6 +952,7 @@ static int __init cppc_cpufreq_init(void)
cppc_check_hisi_workaround();
cppc_freq_invariance_init();
populate_efficiency_class();
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
if (ret)

View File

@ -28,6 +28,7 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
#include <linux/units.h>
#include <trace/events/power.h>
static LIST_HEAD(cpufreq_policy_list);
@ -947,13 +948,14 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
down_read(&policy->rwsem);
ret = fattr->show(policy, buf);
if (likely(!policy_is_inactive(policy)))
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
return ret;
@ -964,7 +966,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
@ -978,7 +980,8 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (cpu_online(policy->cpu)) {
down_write(&policy->rwsem);
ret = fattr->store(policy, buf, count);
if (likely(!policy_is_inactive(policy)))
ret = fattr->store(policy, buf, count);
up_write(&policy->rwsem);
}
@ -1019,11 +1022,12 @@ static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
dev_err(dev, "cpufreq symlink creation failed\n");
}
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
struct device *dev)
{
dev_dbg(dev, "%s: Removing symlink\n", __func__);
sysfs_remove_link(&dev->kobj, "cpufreq");
cpumask_clear_cpu(cpu, policy->real_cpus);
}
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
@ -1337,12 +1341,12 @@ static int cpufreq_online(unsigned int cpu)
down_write(&policy->rwsem);
policy->cpu = cpu;
policy->governor = NULL;
up_write(&policy->rwsem);
} else {
new_policy = true;
policy = cpufreq_policy_alloc(cpu);
if (!policy)
return -ENOMEM;
down_write(&policy->rwsem);
}
if (!new_policy && cpufreq_driver->online) {
@ -1382,7 +1386,6 @@ static int cpufreq_online(unsigned int cpu)
cpumask_copy(policy->related_cpus, policy->cpus);
}
down_write(&policy->rwsem);
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
@ -1531,9 +1534,9 @@ static int cpufreq_online(unsigned int cpu)
out_destroy_policy:
for_each_cpu(j, policy->real_cpus)
remove_cpu_dev_symlink(policy, get_cpu_device(j));
remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
up_write(&policy->rwsem);
cpumask_clear(policy->cpus);
out_offline_policy:
if (cpufreq_driver->offline)
@ -1544,6 +1547,8 @@ out_exit_policy:
cpufreq_driver->exit(policy);
out_free_policy:
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
return ret;
}
@ -1575,47 +1580,36 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return 0;
}
static int cpufreq_offline(unsigned int cpu)
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
{
struct cpufreq_policy *policy;
int ret;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return 0;
}
down_write(&policy->rwsem);
if (has_target())
cpufreq_stop_governor(policy);
cpumask_clear_cpu(cpu, policy->cpus);
if (policy_is_inactive(policy)) {
if (has_target())
strncpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
else
policy->last_policy = policy->policy;
} else if (cpu == policy->cpu) {
/* Nominate new CPU */
policy->cpu = cpumask_any(policy->cpus);
}
/* Start governor again for active policy */
if (!policy_is_inactive(policy)) {
/* Nominate a new CPU if necessary. */
if (cpu == policy->cpu)
policy->cpu = cpumask_any(policy->cpus);
/* Start the governor again for the active policy. */
if (has_target()) {
ret = cpufreq_start_governor(policy);
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
goto unlock;
return;
}
if (has_target())
strncpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
else
policy->last_policy = policy->policy;
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL;
@ -1634,8 +1628,24 @@ static int cpufreq_offline(unsigned int cpu)
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
}
static int cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return 0;
}
down_write(&policy->rwsem);
__cpufreq_offline(cpu, policy);
unlock:
up_write(&policy->rwsem);
return 0;
}
@ -1653,19 +1663,25 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
down_write(&policy->rwsem);
if (cpu_online(cpu))
cpufreq_offline(cpu);
__cpufreq_offline(cpu, policy);
cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, dev);
remove_cpu_dev_symlink(policy, cpu, dev);
if (cpumask_empty(policy->real_cpus)) {
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
cpufreq_policy_free(policy);
if (!cpumask_empty(policy->real_cpus)) {
up_write(&policy->rwsem);
return;
}
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
}
/**
@ -1707,6 +1723,16 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
return new_freq;
if (policy->cur != new_freq) {
/*
* For some platforms, the frequency returned by hardware may be
* slightly different from what is provided in the frequency
* table, for example hardware may return 499 MHz instead of 500
* MHz. In such cases it is better to avoid getting into
* unnecessary frequency updates.
*/
if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
return policy->cur;
cpufreq_out_of_sync(policy, new_freq);
if (update)
schedule_work(&policy->update);

View File

@ -388,6 +388,15 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
gov->free(policy_dbs);
}
static void cpufreq_dbs_data_release(struct kobject *kobj)
{
struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
struct dbs_governor *gov = dbs_data->gov;
gov->exit(dbs_data);
kfree(dbs_data);
}
int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
@ -425,6 +434,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
goto free_policy_dbs_info;
}
dbs_data->gov = gov;
gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
ret = gov->init(dbs_data);
@ -447,6 +457,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
policy->governor_data = policy_dbs;
gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
gov->kobj_type.release = cpufreq_dbs_data_release;
ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
get_governor_parent_kobj(policy),
"%s", gov->gov.name);
@ -488,13 +499,8 @@ void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
policy->governor_data = NULL;
if (!count) {
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data);
kfree(dbs_data);
}
if (!count && !have_governor_per_policy())
gov->gdbs_data = NULL;
free_policy_dbs_info(policy_dbs, gov);

View File

@ -37,6 +37,7 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/* Governor demand based switching data (per-policy or global). */
struct dbs_data {
struct gov_attr_set attr_set;
struct dbs_governor *gov;
void *tuners;
unsigned int ignore_nice_load;
unsigned int sampling_rate;

View File

@ -1322,6 +1322,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
arch_set_max_freq_ratio(global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
@ -2424,6 +2425,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_X, core_funcs),
X86_MATCH(SKYLAKE_X, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};

View File

@ -18,7 +18,6 @@
#include <asm/hw_irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/smp.h>

View File

@ -24,7 +24,7 @@
#include <linux/device.h>
#include <linux/hardirq.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/pmac_feature.h>

View File

@ -22,7 +22,7 @@
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/sections.h>

View File

@ -12,7 +12,6 @@
#include <linux/of_platform.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/cell-regs.h>
#include "ppc_cbe_cpufreq.h"

View File

@ -13,9 +13,9 @@
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/prom.h>
#include <asm/pmi.h>
#include <asm/cell-regs.h>

View File

@ -141,6 +141,7 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool acpi_cpc_valid(void);
extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
extern unsigned int cppc_get_transition_latency(int cpu);
extern bool cpc_ffh_supported(void);
@ -175,6 +176,10 @@ static inline bool acpi_cpc_valid(void)
{
return false;
}
static inline bool cppc_allow_fast_switch(void)
{
return false;
}
static inline unsigned int cppc_get_transition_latency(int cpu)
{
return CPUFREQ_ETERNAL;

View File

@ -574,6 +574,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
#define OSC_SB_PRM_SUPPORT 0x00200000
@ -581,6 +582,7 @@ extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
extern bool osc_sb_native_usb4_support_confirmed;
extern bool osc_sb_cppc_not_supported;
extern bool osc_cpc_flexible_adr_space_confirmed;
/* USB4 Capabilities */
#define OSC_USB_USB3_TUNNELING 0x00000001