2019-05-27 06:55:06 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
|
|
|
|
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
|
|
|
|
* Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
|
|
|
|
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
|
|
|
* - Added processor hotplug support
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/cpufreq.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2013-12-03 00:49:16 +00:00
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <acpi/processor.h>
|
2008-10-24 09:22:04 +00:00
|
|
|
#ifdef CONFIG_X86
|
2008-09-01 12:27:04 +00:00
|
|
|
#include <asm/cpufeature.h>
|
2008-10-24 09:22:04 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-07-28 20:45:54 +00:00
|
|
|
#define PREFIX "ACPI: "
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#define ACPI_PROCESSOR_CLASS "processor"
|
|
|
|
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
|
|
|
|
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
|
2007-02-13 03:42:12 +00:00
|
|
|
ACPI_MODULE_NAME("processor_perflib");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-04-27 09:25:00 +00:00
|
|
|
static DEFINE_MUTEX(performance_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* _PPC support is implemented as a CPUfreq policy notifier:
|
|
|
|
* This means each time a CPUfreq driver registered also with
|
|
|
|
* the ACPI core is asked to change the speed policy, the maximum
|
|
|
|
* value is adjusted so that it is within the platform limit.
|
|
|
|
*
|
|
|
|
* Also, when a new platform limit value is detected, the CPUfreq
|
|
|
|
* policy is adjusted accordingly.
|
|
|
|
*/
|
|
|
|
|
2008-07-30 05:32:58 +00:00
|
|
|
/* ignore_ppc:
|
|
|
|
* -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
|
|
|
|
* ignore _PPC
|
|
|
|
* 0 -> cpufreq low level drivers initialized -> consider _PPC values
|
|
|
|
* 1 -> ignore _PPC totally -> forced by user through boot param
|
|
|
|
*/
|
2008-08-12 15:48:27 +00:00
|
|
|
static int ignore_ppc = -1;
|
2008-08-16 00:11:28 +00:00
|
|
|
module_param(ignore_ppc, int, 0644);
|
2007-05-19 02:59:28 +00:00
|
|
|
MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
|
|
|
|
"limited by BIOS, this should help");
|
|
|
|
|
2019-08-28 08:50:13 +00:00
|
|
|
static bool acpi_processor_ppc_in_use;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status = 0;
|
2008-10-10 06:22:59 +00:00
|
|
|
unsigned long long ppc = 0;
|
2019-08-28 08:50:13 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!pr)
|
2006-06-27 04:41:40 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* _PPC indicates the maximum state currently supported by the platform
|
|
|
|
* (e.g. 0 = states 0..n; 1 = states 1..n; etc.
|
|
|
|
*/
|
|
|
|
status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
|
|
|
|
|
|
|
|
if (status != AE_NOT_FOUND)
|
2019-08-28 08:50:13 +00:00
|
|
|
acpi_processor_ppc_in_use = true;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
2006-06-27 03:58:43 +00:00
|
|
|
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
|
2006-06-27 04:41:40 +00:00
|
|
|
return -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-03-27 13:04:46 +00:00
|
|
|
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
|
2007-10-31 14:41:42 +00:00
|
|
|
(int)ppc, ppc ? "" : "not");
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
pr->performance_platform_limit = (int)ppc;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-28 08:50:13 +00:00
|
|
|
if (ppc >= pr->performance->state_count ||
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
unlikely(!freq_qos_request_active(&pr->perflib_req)))
|
2019-08-28 08:50:13 +00:00
|
|
|
return 0;
|
|
|
|
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
ret = freq_qos_update_request(&pr->perflib_req,
|
2019-08-28 08:50:13 +00:00
|
|
|
pr->performance->states[ppc].core_frequency * 1000);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
|
|
|
|
pr->id, ret);
|
|
|
|
}
|
|
|
|
|
2006-06-27 04:41:40 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-10-16 01:20:41 +00:00
|
|
|
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
|
|
|
|
/*
|
|
|
|
* acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
|
|
|
|
* @handle: ACPI processor handle
|
|
|
|
* @status: the status code of _PPC evaluation
|
|
|
|
* 0: success. OSPM is now using the performance state specificed.
|
|
|
|
* 1: failure. OSPM has not changed the number of P-states in use
|
|
|
|
*/
|
|
|
|
static void acpi_processor_ppc_ost(acpi_handle handle, int status)
|
|
|
|
{
|
2014-02-19 06:02:18 +00:00
|
|
|
if (acpi_has_method(handle, "_OST"))
|
|
|
|
acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
|
|
|
|
status, NULL);
|
2009-10-16 01:20:41 +00:00
|
|
|
}
|
|
|
|
|
2016-11-18 12:57:54 +00:00
|
|
|
void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-05-19 02:59:28 +00:00
|
|
|
int ret;
|
|
|
|
|
2018-01-29 02:26:46 +00:00
|
|
|
if (ignore_ppc || !pr->performance) {
|
2009-10-16 01:20:41 +00:00
|
|
|
/*
|
|
|
|
* Only when it is notification event, the _OST object
|
|
|
|
* will be evaluated. Otherwise it is skipped.
|
|
|
|
*/
|
|
|
|
if (event_flag)
|
|
|
|
acpi_processor_ppc_ost(pr->handle, 1);
|
2016-11-18 12:57:54 +00:00
|
|
|
return;
|
2009-10-16 01:20:41 +00:00
|
|
|
}
|
2007-05-19 02:59:28 +00:00
|
|
|
|
|
|
|
ret = acpi_processor_get_platform_limit(pr);
|
2009-10-16 01:20:41 +00:00
|
|
|
/*
|
|
|
|
* Only when it is notification event, the _OST object
|
|
|
|
* will be evaluated. Otherwise it is skipped.
|
|
|
|
*/
|
|
|
|
if (event_flag) {
|
|
|
|
if (ret < 0)
|
|
|
|
acpi_processor_ppc_ost(pr->handle, 1);
|
|
|
|
else
|
|
|
|
acpi_processor_ppc_ost(pr->handle, 0);
|
|
|
|
}
|
2016-11-18 12:57:54 +00:00
|
|
|
if (ret >= 0)
|
2019-03-26 11:15:13 +00:00
|
|
|
cpufreq_update_limits(pr->id);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-11-19 11:31:01 +00:00
|
|
|
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
|
|
|
|
{
|
|
|
|
struct acpi_processor *pr;
|
|
|
|
|
|
|
|
pr = per_cpu(processors, cpu);
|
|
|
|
if (!pr || !pr->performance || !pr->performance->state_count)
|
|
|
|
return -ENODEV;
|
|
|
|
*limit = pr->performance->states[pr->performance_platform_limit].
|
|
|
|
core_frequency * 1000;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(acpi_processor_get_bios_limit);
|
|
|
|
|
2019-08-28 08:50:13 +00:00
|
|
|
void acpi_processor_ignore_ppc_init(void)
|
2005-08-05 04:44:28 +00:00
|
|
|
{
|
2019-08-28 08:50:13 +00:00
|
|
|
if (ignore_ppc < 0)
|
|
|
|
ignore_ppc = 0;
|
|
|
|
}
|
|
|
|
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
void acpi_processor_ppc_init(struct cpufreq_policy *policy)
|
2019-08-28 08:50:13 +00:00
|
|
|
{
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
int cpu = policy->cpu;
|
2019-08-28 08:50:13 +00:00
|
|
|
struct acpi_processor *pr = per_cpu(processors, cpu);
|
|
|
|
int ret;
|
|
|
|
|
2019-10-15 17:35:20 +00:00
|
|
|
if (!pr)
|
|
|
|
return;
|
|
|
|
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
ret = freq_qos_add_request(&policy->constraints, &pr->perflib_req,
|
|
|
|
FREQ_QOS_MAX, INT_MAX);
|
2019-10-15 17:35:20 +00:00
|
|
|
if (ret < 0)
|
2019-08-28 08:50:13 +00:00
|
|
|
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
|
|
|
|
ret);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
|
2005-08-05 04:44:28 +00:00
|
|
|
{
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
struct acpi_processor *pr = per_cpu(processors, policy->cpu);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-10-15 17:35:20 +00:00
|
|
|
if (pr)
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 10:47:06 +00:00
|
|
|
freq_qos_remove_request(&pr->perflib_req);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
int result = 0;
|
|
|
|
acpi_status status = 0;
|
|
|
|
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
|
|
union acpi_object *pct = NULL;
|
|
|
|
union acpi_object obj = { 0 };
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2006-06-27 03:58:43 +00:00
|
|
|
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
|
2006-06-27 04:41:40 +00:00
|
|
|
return -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
pct = (union acpi_object *)buffer.pointer;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
|
2005-08-05 04:44:28 +00:00
|
|
|
|| (pct->package.count != 2)) {
|
2006-06-27 03:41:38 +00:00
|
|
|
printk(KERN_ERR PREFIX "Invalid _PCT data\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* control_register
|
|
|
|
*/
|
|
|
|
|
|
|
|
obj = pct->package.elements[0];
|
|
|
|
|
|
|
|
if ((obj.type != ACPI_TYPE_BUFFER)
|
2005-08-05 04:44:28 +00:00
|
|
|
|| (obj.buffer.length < sizeof(struct acpi_pct_register))
|
|
|
|
|| (obj.buffer.pointer == NULL)) {
|
2006-06-27 03:41:38 +00:00
|
|
|
printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
2005-08-05 04:44:28 +00:00
|
|
|
memcpy(&pr->performance->control_register, obj.buffer.pointer,
|
|
|
|
sizeof(struct acpi_pct_register));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* status_register
|
|
|
|
*/
|
|
|
|
|
|
|
|
obj = pct->package.elements[1];
|
|
|
|
|
|
|
|
if ((obj.type != ACPI_TYPE_BUFFER)
|
2005-08-05 04:44:28 +00:00
|
|
|
|| (obj.buffer.length < sizeof(struct acpi_pct_register))
|
|
|
|
|| (obj.buffer.pointer == NULL)) {
|
2006-06-27 03:41:38 +00:00
|
|
|
printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
memcpy(&pr->performance->status_register, obj.buffer.pointer,
|
|
|
|
sizeof(struct acpi_pct_register));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
end:
|
2006-06-30 07:19:10 +00:00
|
|
|
kfree(buffer.pointer);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-27 04:41:40 +00:00
|
|
|
return result;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 08:28:06 +00:00
|
|
|
#ifdef CONFIG_X86
|
|
|
|
/*
|
|
|
|
* Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
|
|
|
|
* in their ACPI data. Calculate the real values and fix up the _PSS data.
|
|
|
|
*/
|
|
|
|
static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
|
|
|
|
{
|
|
|
|
u32 hi, lo, fid, did;
|
|
|
|
int index = px->control & 0x00000007;
|
|
|
|
|
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|
|
|
|
|| boot_cpu_data.x86 == 0x11) {
|
|
|
|
rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
|
2013-01-22 12:37:21 +00:00
|
|
|
/*
|
|
|
|
* MSR C001_0064+:
|
|
|
|
* Bit 63: PstateEn. Read-write. If set, the P-state is valid.
|
|
|
|
*/
|
|
|
|
if (!(hi & BIT(31)))
|
|
|
|
return;
|
|
|
|
|
2012-09-04 08:28:06 +00:00
|
|
|
fid = lo & 0x3f;
|
|
|
|
did = (lo >> 6) & 7;
|
|
|
|
if (boot_cpu_data.x86 == 0x10)
|
|
|
|
px->core_frequency = (100 * (fid + 0x10)) >> did;
|
|
|
|
else
|
|
|
|
px->core_frequency = (100 * (fid + 8)) >> did;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
|
|
|
|
#endif
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
static int acpi_processor_get_performance_states(struct acpi_processor *pr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
int result = 0;
|
|
|
|
acpi_status status = AE_OK;
|
|
|
|
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
|
|
struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
|
|
|
|
struct acpi_buffer state = { 0, NULL };
|
|
|
|
union acpi_object *pss = NULL;
|
|
|
|
int i;
|
2012-05-04 16:53:44 +00:00
|
|
|
int last_invalid = -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2006-06-27 03:58:43 +00:00
|
|
|
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
|
2006-06-27 04:41:40 +00:00
|
|
|
return -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-09-30 22:28:50 +00:00
|
|
|
pss = buffer.pointer;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
|
2006-06-27 03:41:38 +00:00
|
|
|
printk(KERN_ERR PREFIX "Invalid _PSS data\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
|
2005-08-05 04:44:28 +00:00
|
|
|
pss->package.count));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pr->performance->state_count = pss->package.count;
|
2005-08-05 04:44:28 +00:00
|
|
|
pr->performance->states =
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
kmalloc_array(pss->package.count,
|
|
|
|
sizeof(struct acpi_processor_px),
|
|
|
|
GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pr->performance->states) {
|
|
|
|
result = -ENOMEM;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pr->performance->state_count; i++) {
|
|
|
|
|
|
|
|
struct acpi_processor_px *px = &(pr->performance->states[i]);
|
|
|
|
|
|
|
|
state.length = sizeof(struct acpi_processor_px);
|
|
|
|
state.pointer = px;
|
|
|
|
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
|
|
|
|
|
|
|
|
status = acpi_extract_package(&(pss->package.elements[i]),
|
2005-08-05 04:44:28 +00:00
|
|
|
&format, &state);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2006-06-27 03:58:43 +00:00
|
|
|
ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
|
2005-04-16 22:20:36 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
kfree(pr->performance->states);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2012-09-04 08:28:06 +00:00
|
|
|
amd_fixup_frequency(px, i);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2005-08-05 04:44:28 +00:00
|
|
|
"State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
|
|
|
|
i,
|
|
|
|
(u32) px->core_frequency,
|
|
|
|
(u32) px->power,
|
|
|
|
(u32) px->transition_latency,
|
|
|
|
(u32) px->bus_master_latency,
|
|
|
|
(u32) px->control, (u32) px->status));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: sanity check _PSS frequency to prevent cpufreq crash
When BIOS SETUP is changed to disable EIST, some BIOS
hand the OS an un-initialized _PSS:
Name (_PSS, Package (0x06)
{
Package (0x06)
{
0x80000000, // frequency [MHz]
0x80000000, // power [mW]
0x80000000, // latency [us]
0x80000000, // BM latency [us]
0x80000000, // control
0x80000000 // status
},
...
These are outrageous values for frequency,
power and latency, raising the question where to draw
the line between legal and illegal. We tend to survive
garbage in the power and latency fields, but we can BUG_ON
when garbage is in the frequency field.
Cpufreq multiplies the frequency by 1000 and stores it in a u32 KHz.
So disregard a _PSS with a frequency so large
that it can't be represented by cpufreq.
https://bugzilla.redhat.com/show_bug.cgi?id=500311
Signed-off-by: Len Brown <len.brown@intel.com>
2009-05-26 19:11:06 +00:00
|
|
|
/*
|
|
|
|
* Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
|
|
|
|
*/
|
|
|
|
if (!px->core_frequency ||
|
|
|
|
((u32)(px->core_frequency * 1000) !=
|
|
|
|
(px->core_frequency * 1000))) {
|
|
|
|
printk(KERN_ERR FW_BUG PREFIX
|
2012-05-04 16:53:44 +00:00
|
|
|
"Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
|
|
|
|
pr->id, px->core_frequency);
|
|
|
|
if (last_invalid == -1)
|
|
|
|
last_invalid = i;
|
|
|
|
} else {
|
|
|
|
if (last_invalid != -1) {
|
|
|
|
/*
|
|
|
|
* Copy this valid entry over last_invalid entry
|
|
|
|
*/
|
|
|
|
memcpy(&(pr->performance->states[last_invalid]),
|
|
|
|
px, sizeof(struct acpi_processor_px));
|
|
|
|
++last_invalid;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-04 16:53:44 +00:00
|
|
|
if (last_invalid == 0) {
|
|
|
|
printk(KERN_ERR FW_BUG PREFIX
|
|
|
|
"No valid BIOS _PSS frequency found for processor %d\n", pr->id);
|
|
|
|
result = -EFAULT;
|
|
|
|
kfree(pr->performance->states);
|
|
|
|
pr->performance->states = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_invalid > 0)
|
|
|
|
pr->performance->state_count = last_invalid;
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
end:
|
2006-06-30 07:19:10 +00:00
|
|
|
kfree(buffer.pointer);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-27 04:41:40 +00:00
|
|
|
return result;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-03-05 18:42:54 +00:00
|
|
|
int acpi_processor_get_performance_info(struct acpi_processor *pr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
int result = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!pr || !pr->performance || !pr->handle)
|
2006-06-27 04:41:40 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-06-28 16:24:38 +00:00
|
|
|
if (!acpi_has_method(pr->handle, "_PCT")) {
|
2005-04-16 22:20:36 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2005-08-05 04:44:28 +00:00
|
|
|
"ACPI-based processor performance control unavailable\n"));
|
2006-06-27 04:41:40 +00:00
|
|
|
return -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
result = acpi_processor_get_performance_control(pr);
|
|
|
|
if (result)
|
2008-09-01 12:27:04 +00:00
|
|
|
goto update_bios;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
result = acpi_processor_get_performance_states(pr);
|
|
|
|
if (result)
|
2008-09-01 12:27:04 +00:00
|
|
|
goto update_bios;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-02-18 18:28:20 +00:00
|
|
|
/* We need to call _PPC once when cpufreq starts */
|
|
|
|
if (ignore_ppc != 1)
|
|
|
|
result = acpi_processor_get_platform_limit(pr);
|
|
|
|
|
|
|
|
return result;
|
2008-09-01 12:27:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
|
|
|
|
* the BIOS is older than the CPU and does not know its frequencies
|
|
|
|
*/
|
|
|
|
update_bios:
|
2008-10-24 09:22:04 +00:00
|
|
|
#ifdef CONFIG_X86
|
2013-06-28 16:24:38 +00:00
|
|
|
if (acpi_has_method(pr->handle, "_PPC")) {
|
2008-09-01 12:27:04 +00:00
|
|
|
if(boot_cpu_has(X86_FEATURE_EST))
|
|
|
|
printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
|
|
|
|
"frequency support\n");
|
|
|
|
}
|
2008-10-24 09:22:04 +00:00
|
|
|
#endif
|
2008-09-01 12:27:04 +00:00
|
|
|
return result;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2013-03-05 18:42:54 +00:00
|
|
|
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
|
2016-11-17 21:47:47 +00:00
|
|
|
|
|
|
|
int acpi_processor_pstate_control(void)
|
2005-08-05 04:44:28 +00:00
|
|
|
{
|
|
|
|
acpi_status status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-11-17 21:47:47 +00:00
|
|
|
if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
|
|
"Writing pstate_control [0x%x] to smi_command [0x%x]\n",
|
|
|
|
acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
|
|
|
|
|
|
|
|
status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
|
|
|
|
(u32)acpi_gbl_FADT.pstate_control, 8);
|
|
|
|
if (ACPI_SUCCESS(status))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
ACPI_EXCEPTION((AE_INFO, status,
|
|
|
|
"Failed to write pstate_control [0x%x] to smi_command [0x%x]",
|
|
|
|
acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
int acpi_processor_notify_smm(struct module *calling_module)
|
|
|
|
{
|
|
|
|
static int is_done = 0;
|
|
|
|
int result;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-28 08:50:13 +00:00
|
|
|
if (!acpi_processor_cpufreq_init)
|
2006-06-27 04:41:40 +00:00
|
|
|
return -EBUSY;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!try_module_get(calling_module))
|
2006-06-27 04:41:40 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-09-07 16:49:45 +00:00
|
|
|
/* is_done is set to negative if an error occurred,
|
|
|
|
* and to postitive if _no_ error occurred, but SMM
|
2005-04-16 22:20:36 +00:00
|
|
|
* was already notified. This avoids double notification
|
|
|
|
* which might lead to unexpected results...
|
|
|
|
*/
|
|
|
|
if (is_done > 0) {
|
|
|
|
module_put(calling_module);
|
2006-06-27 04:41:40 +00:00
|
|
|
return 0;
|
2005-08-05 04:44:28 +00:00
|
|
|
} else if (is_done < 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
module_put(calling_module);
|
2006-06-27 04:41:40 +00:00
|
|
|
return is_done;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
is_done = -EIO;
|
|
|
|
|
2016-11-17 21:47:47 +00:00
|
|
|
result = acpi_processor_pstate_control();
|
|
|
|
if (!result) {
|
2007-02-02 16:48:19 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
|
2005-04-16 22:20:36 +00:00
|
|
|
module_put(calling_module);
|
2006-06-27 04:41:40 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2016-11-17 21:47:47 +00:00
|
|
|
if (result < 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
module_put(calling_module);
|
2016-11-17 21:47:47 +00:00
|
|
|
return result;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Success. If there's no _PPC, we need to fear nothing, so
|
|
|
|
* we can allow the cpufreq driver to be rmmod'ed. */
|
|
|
|
is_done = 1;
|
|
|
|
|
2019-08-28 08:50:13 +00:00
|
|
|
if (!acpi_processor_ppc_in_use)
|
2005-04-16 22:20:36 +00:00
|
|
|
module_put(calling_module);
|
|
|
|
|
2006-06-27 04:41:40 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
EXPORT_SYMBOL(acpi_processor_notify_smm);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-03-15 14:22:05 +00:00
|
|
|
int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
|
2005-12-14 20:05:00 +00:00
|
|
|
{
|
|
|
|
int result = 0;
|
|
|
|
acpi_status status = AE_OK;
|
|
|
|
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
|
|
|
|
struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
|
|
|
|
struct acpi_buffer state = {0, NULL};
|
|
|
|
union acpi_object *psd = NULL;
|
|
|
|
|
2018-03-15 14:22:05 +00:00
|
|
|
status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
|
2005-12-14 20:05:00 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2006-05-11 04:28:12 +00:00
|
|
|
return -ENODEV;
|
2005-12-14 20:05:00 +00:00
|
|
|
}
|
|
|
|
|
2006-09-30 22:28:50 +00:00
|
|
|
psd = buffer.pointer;
|
2005-12-14 20:05:00 +00:00
|
|
|
if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
|
2008-09-28 06:51:56 +00:00
|
|
|
printk(KERN_ERR PREFIX "Invalid _PSD data\n");
|
2005-12-14 20:05:00 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (psd->package.count != 1) {
|
2008-09-28 06:51:56 +00:00
|
|
|
printk(KERN_ERR PREFIX "Invalid _PSD data\n");
|
2005-12-14 20:05:00 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
state.length = sizeof(struct acpi_psd_package);
|
|
|
|
state.pointer = pdomain;
|
|
|
|
|
|
|
|
status = acpi_extract_package(&(psd->package.elements[0]),
|
|
|
|
&format, &state);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
2008-09-28 06:51:56 +00:00
|
|
|
printk(KERN_ERR PREFIX "Invalid _PSD data\n");
|
2005-12-14 20:05:00 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
|
2008-09-28 06:51:56 +00:00
|
|
|
printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
|
2005-12-14 20:05:00 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
|
2008-09-28 06:51:56 +00:00
|
|
|
printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
|
2005-12-14 20:05:00 +00:00
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2009-03-24 12:41:59 +00:00
|
|
|
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
|
|
|
|
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
|
|
|
|
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
|
|
|
|
printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
|
|
|
|
result = -EFAULT;
|
|
|
|
goto end;
|
|
|
|
}
|
2005-12-14 20:05:00 +00:00
|
|
|
end:
|
2006-06-30 07:19:10 +00:00
|
|
|
kfree(buffer.pointer);
|
2006-05-11 04:28:12 +00:00
|
|
|
return result;
|
2005-12-14 20:05:00 +00:00
|
|
|
}
|
2018-03-15 14:22:05 +00:00
|
|
|
EXPORT_SYMBOL(acpi_processor_get_psd);
|
2005-12-14 20:05:00 +00:00
|
|
|
|
|
|
|
int acpi_processor_preregister_performance(
|
2010-02-02 05:39:15 +00:00
|
|
|
struct acpi_processor_performance __percpu *performance)
|
2005-12-14 20:05:00 +00:00
|
|
|
{
|
2013-06-25 02:06:45 +00:00
|
|
|
int count_target;
|
2005-12-14 20:05:00 +00:00
|
|
|
int retval = 0;
|
|
|
|
unsigned int i, j;
|
2009-01-01 02:08:47 +00:00
|
|
|
cpumask_var_t covered_cpus;
|
2005-12-14 20:05:00 +00:00
|
|
|
struct acpi_processor *pr;
|
|
|
|
struct acpi_psd_package *pdomain;
|
|
|
|
struct acpi_processor *match_pr;
|
|
|
|
struct acpi_psd_package *match_pdomain;
|
|
|
|
|
2009-06-15 06:58:26 +00:00
|
|
|
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
2009-01-01 02:08:47 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2006-06-16 02:19:31 +00:00
|
|
|
mutex_lock(&performance_mutex);
|
2005-12-14 20:05:00 +00:00
|
|
|
|
2009-03-24 12:41:59 +00:00
|
|
|
/*
|
|
|
|
* Check if another driver has already registered, and abort before
|
|
|
|
* changing pr->performance if it has. Check input data as well.
|
|
|
|
*/
|
2006-04-27 09:25:00 +00:00
|
|
|
for_each_possible_cpu(i) {
|
2008-06-09 23:22:23 +00:00
|
|
|
pr = per_cpu(processors, i);
|
2005-12-14 20:05:00 +00:00
|
|
|
if (!pr) {
|
|
|
|
/* Look only at processors in ACPI namespace */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pr->performance) {
|
|
|
|
retval = -EBUSY;
|
2009-03-24 12:41:59 +00:00
|
|
|
goto err_out;
|
2005-12-14 20:05:00 +00:00
|
|
|
}
|
|
|
|
|
2009-02-20 07:29:08 +00:00
|
|
|
if (!performance || !per_cpu_ptr(performance, i)) {
|
2005-12-14 20:05:00 +00:00
|
|
|
retval = -EINVAL;
|
2009-03-24 12:41:59 +00:00
|
|
|
goto err_out;
|
2005-12-14 20:05:00 +00:00
|
|
|
}
|
2009-03-24 12:41:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Call _PSD for all CPUs */
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
pr = per_cpu(processors, i);
|
|
|
|
if (!pr)
|
|
|
|
continue;
|
2005-12-14 20:05:00 +00:00
|
|
|
|
2009-02-20 07:29:08 +00:00
|
|
|
pr->performance = per_cpu_ptr(performance, i);
|
2009-01-01 02:08:47 +00:00
|
|
|
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
2018-03-15 14:22:05 +00:00
|
|
|
pdomain = &(pr->performance->domain_info);
|
|
|
|
if (acpi_processor_get_psd(pr->handle, pdomain)) {
|
2005-12-14 20:05:00 +00:00
|
|
|
retval = -EINVAL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (retval)
|
|
|
|
goto err_ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that we have _PSD data from all CPUs, lets setup P-state
|
|
|
|
* domain info.
|
|
|
|
*/
|
2006-04-27 09:25:00 +00:00
|
|
|
for_each_possible_cpu(i) {
|
2008-06-09 23:22:23 +00:00
|
|
|
pr = per_cpu(processors, i);
|
2005-12-14 20:05:00 +00:00
|
|
|
if (!pr)
|
|
|
|
continue;
|
|
|
|
|
2009-01-01 02:08:47 +00:00
|
|
|
if (cpumask_test_cpu(i, covered_cpus))
|
2005-12-14 20:05:00 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
pdomain = &(pr->performance->domain_info);
|
2009-01-01 02:08:47 +00:00
|
|
|
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
|
|
|
cpumask_set_cpu(i, covered_cpus);
|
2005-12-14 20:05:00 +00:00
|
|
|
if (pdomain->num_processors <= 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Validate the Domain info */
|
|
|
|
count_target = pdomain->num_processors;
|
2006-06-26 04:34:43 +00:00
|
|
|
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
|
2005-12-14 20:05:00 +00:00
|
|
|
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
2006-06-26 04:34:43 +00:00
|
|
|
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
|
|
|
|
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
|
|
|
|
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
|
2005-12-14 20:05:00 +00:00
|
|
|
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
|
|
|
|
|
2006-04-27 09:25:00 +00:00
|
|
|
for_each_possible_cpu(j) {
|
2005-12-14 20:05:00 +00:00
|
|
|
if (i == j)
|
|
|
|
continue;
|
|
|
|
|
2008-06-09 23:22:23 +00:00
|
|
|
match_pr = per_cpu(processors, j);
|
2005-12-14 20:05:00 +00:00
|
|
|
if (!match_pr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
match_pdomain = &(match_pr->performance->domain_info);
|
|
|
|
if (match_pdomain->domain != pdomain->domain)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Here i and j are in the same domain */
|
|
|
|
|
|
|
|
if (match_pdomain->num_processors != count_target) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto err_ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pdomain->coord_type != match_pdomain->coord_type) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto err_ret;
|
|
|
|
}
|
|
|
|
|
2009-01-01 02:08:47 +00:00
|
|
|
cpumask_set_cpu(j, covered_cpus);
|
|
|
|
cpumask_set_cpu(j, pr->performance->shared_cpu_map);
|
2005-12-14 20:05:00 +00:00
|
|
|
}
|
|
|
|
|
2006-04-27 09:25:00 +00:00
|
|
|
for_each_possible_cpu(j) {
|
2005-12-14 20:05:00 +00:00
|
|
|
if (i == j)
|
|
|
|
continue;
|
|
|
|
|
2008-06-09 23:22:23 +00:00
|
|
|
match_pr = per_cpu(processors, j);
|
2005-12-14 20:05:00 +00:00
|
|
|
if (!match_pr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
match_pdomain = &(match_pr->performance->domain_info);
|
|
|
|
if (match_pdomain->domain != pdomain->domain)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
match_pr->performance->shared_type =
|
|
|
|
pr->performance->shared_type;
|
2009-01-01 02:08:47 +00:00
|
|
|
cpumask_copy(match_pr->performance->shared_cpu_map,
|
|
|
|
pr->performance->shared_cpu_map);
|
2005-12-14 20:05:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err_ret:
|
2006-04-27 09:25:00 +00:00
|
|
|
for_each_possible_cpu(i) {
|
2008-06-09 23:22:23 +00:00
|
|
|
pr = per_cpu(processors, i);
|
2005-12-14 20:05:00 +00:00
|
|
|
if (!pr || !pr->performance)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Assume no coordination on any error parsing domain info */
|
|
|
|
if (retval) {
|
2009-01-01 02:08:47 +00:00
|
|
|
cpumask_clear(pr->performance->shared_cpu_map);
|
|
|
|
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
2005-12-14 20:05:00 +00:00
|
|
|
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
|
|
|
}
|
|
|
|
pr->performance = NULL; /* Will be set for real in register */
|
|
|
|
}
|
|
|
|
|
2009-03-24 12:41:59 +00:00
|
|
|
err_out:
|
2006-06-16 02:19:31 +00:00
|
|
|
mutex_unlock(&performance_mutex);
|
2009-01-01 02:08:47 +00:00
|
|
|
free_cpumask_var(covered_cpus);
|
2006-05-11 04:28:12 +00:00
|
|
|
return retval;
|
2005-12-14 20:05:00 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(acpi_processor_preregister_performance);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_processor_register_performance(struct acpi_processor_performance
|
|
|
|
*performance, unsigned int cpu)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct acpi_processor *pr;
|
|
|
|
|
2019-08-28 08:50:13 +00:00
|
|
|
if (!acpi_processor_cpufreq_init)
|
2006-06-27 04:41:40 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_lock(&performance_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-06-09 23:22:23 +00:00
|
|
|
pr = per_cpu(processors, cpu);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pr) {
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_unlock(&performance_mutex);
|
2006-06-27 04:41:40 +00:00
|
|
|
return -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pr->performance) {
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_unlock(&performance_mutex);
|
2006-06-27 04:41:40 +00:00
|
|
|
return -EBUSY;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-06-10 16:54:13 +00:00
|
|
|
WARN_ON(!performance);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
pr->performance = performance;
|
|
|
|
|
|
|
|
if (acpi_processor_get_performance_info(pr)) {
|
|
|
|
pr->performance = NULL;
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_unlock(&performance_mutex);
|
2006-06-27 04:41:40 +00:00
|
|
|
return -EIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_unlock(&performance_mutex);
|
2006-06-27 04:41:40 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
EXPORT_SYMBOL(acpi_processor_register_performance);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-07-22 20:11:16 +00:00
|
|
|
void acpi_processor_unregister_performance(unsigned int cpu)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct acpi_processor *pr;
|
|
|
|
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_lock(&performance_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-06-09 23:22:23 +00:00
|
|
|
pr = per_cpu(processors, cpu);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pr) {
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_unlock(&performance_mutex);
|
2006-06-27 04:41:40 +00:00
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-06-10 16:54:13 +00:00
|
|
|
if (pr->performance)
|
|
|
|
kfree(pr->performance->states);
|
2005-04-16 22:20:36 +00:00
|
|
|
pr->performance = NULL;
|
|
|
|
|
2006-04-27 09:25:00 +00:00
|
|
|
mutex_unlock(&performance_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-27 04:41:40 +00:00
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-08-05 04:44:28 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
EXPORT_SYMBOL(acpi_processor_unregister_performance);
|