2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* drivers/cpufreq/cpufreq_ondemand.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 Russell King
|
|
|
|
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
|
|
|
|
* Jun Nakajima <jun.nakajima@intel.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/cpufreq.h>
|
2012-10-25 22:47:42 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/kernel_stat.h>
|
2012-10-25 22:47:42 +00:00
|
|
|
#include <linux/kobject.h>
|
|
|
|
#include <linux/module.h>
|
2006-01-13 23:54:22 +00:00
|
|
|
#include <linux/mutex.h>
|
2012-10-25 22:47:42 +00:00
|
|
|
#include <linux/percpu-defs.h>
|
|
|
|
#include <linux/sysfs.h>
|
2008-08-04 18:59:12 +00:00
|
|
|
#include <linux/tick.h>
|
2012-10-25 22:47:42 +00:00
|
|
|
#include <linux/types.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
#include "cpufreq_governor.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
/* On-demand governor macors */
|
2008-08-04 18:59:10 +00:00
|
|
|
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
|
2005-04-16 22:20:36 +00:00
|
|
|
#define DEF_FREQUENCY_UP_THRESHOLD (80)
|
2010-10-06 20:54:24 +00:00
|
|
|
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
|
|
|
#define MAX_SAMPLING_DOWN_FACTOR (100000)
|
2008-08-04 18:59:12 +00:00
|
|
|
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
|
|
|
|
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
|
2009-04-22 11:48:29 +00:00
|
|
|
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
|
2005-06-01 02:03:50 +00:00
|
|
|
#define MIN_FREQUENCY_UP_THRESHOLD (11)
|
2005-04-16 22:20:36 +00:00
|
|
|
#define MAX_FREQUENCY_UP_THRESHOLD (100)
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static struct dbs_data od_dbs_data;
|
|
|
|
static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static struct od_dbs_tuners od_tuners = {
|
2006-02-28 05:43:23 +00:00
|
|
|
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
|
2010-10-06 20:54:24 +00:00
|
|
|
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
|
2008-08-04 18:59:10 +00:00
|
|
|
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
|
2006-03-10 09:35:27 +00:00
|
|
|
.ignore_nice = 0,
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
.powersave_bias = 0,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static void ondemand_powersave_bias_init_cpu(int cpu)
|
ondemand: Solve a big performance issue by counting IOWAIT time as busy
The ondemand cpufreq governor uses CPU busy time (e.g. not-idle
time) as a measure for scaling the CPU frequency up or down.
If the CPU is busy, the CPU frequency scales up, if it's idle,
the CPU frequency scales down. Effectively, it uses the CPU busy
time as proxy variable for the more nebulous "how critical is
performance right now" question.
This algorithm falls flat on its face in the light of workloads
where you're alternatingly disk and CPU bound, such as the ever
popular "git grep", but also things like startup of programs and
maildir using email clients... much to the chagarin of Andrew
Morton.
This patch changes the ondemand algorithm to count iowait time
as busy, not idle, time. As shown in the breakdown cases above,
iowait is performance critical often, and by counting iowait,
the proxy variable becomes a more accurate representation of the
"how critical is performance" question.
The problem and fix are both verified with the "perf timechar"
tool.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Jones <davej@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100509082606.3d9f00d0@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-05-09 15:26:06 +00:00
|
|
|
{
|
2012-10-25 22:47:42 +00:00
|
|
|
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
ondemand: Solve a big performance issue by counting IOWAIT time as busy
The ondemand cpufreq governor uses CPU busy time (e.g. not-idle
time) as a measure for scaling the CPU frequency up or down.
If the CPU is busy, the CPU frequency scales up, if it's idle,
the CPU frequency scales down. Effectively, it uses the CPU busy
time as proxy variable for the more nebulous "how critical is
performance right now" question.
This algorithm falls flat on its face in the light of workloads
where you're alternatingly disk and CPU bound, such as the ever
popular "git grep", but also things like startup of programs and
maildir using email clients... much to the chagarin of Andrew
Morton.
This patch changes the ondemand algorithm to count iowait time
as busy, not idle, time. As shown in the breakdown cases above,
iowait is performance critical often, and by counting iowait,
the proxy variable becomes a more accurate representation of the
"how critical is performance" question.
The problem and fix are both verified with the "perf timechar"
tool.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Jones <davej@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100509082606.3d9f00d0@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-05-09 15:26:06 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
|
|
|
|
dbs_info->freq_lo = 0;
|
|
|
|
}
|
ondemand: Solve a big performance issue by counting IOWAIT time as busy
The ondemand cpufreq governor uses CPU busy time (e.g. not-idle
time) as a measure for scaling the CPU frequency up or down.
If the CPU is busy, the CPU frequency scales up, if it's idle,
the CPU frequency scales down. Effectively, it uses the CPU busy
time as proxy variable for the more nebulous "how critical is
performance right now" question.
This algorithm falls flat on its face in the light of workloads
where you're alternatingly disk and CPU bound, such as the ever
popular "git grep", but also things like startup of programs and
maildir using email clients... much to the chagarin of Andrew
Morton.
This patch changes the ondemand algorithm to count iowait time
as busy, not idle, time. As shown in the breakdown cases above,
iowait is performance critical often, and by counting iowait,
the proxy variable becomes a more accurate representation of the
"how critical is performance" question.
The problem and fix are both verified with the "perf timechar"
tool.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Jones <davej@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100509082606.3d9f00d0@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-05-09 15:26:06 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
/*
|
|
|
|
* Not all CPUs want IO time to be accounted as busy; this depends on how
|
|
|
|
* efficient idling at a higher frequency/voltage is.
|
|
|
|
* Pavel Machek says this is not so for various generations of AMD and old
|
|
|
|
* Intel systems.
|
|
|
|
* Mike Chan (androidlcom) calis this is also not true for ARM.
|
|
|
|
* Because of this, whitelist specific known (series) of CPUs by default, and
|
|
|
|
* leave all others up to the user.
|
|
|
|
*/
|
|
|
|
static int should_io_be_busy(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_X86)
|
|
|
|
/*
|
|
|
|
* For Intel, Core 2 (model 15) andl later have an efficient idle.
|
|
|
|
*/
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
|
|
|
boot_cpu_data.x86 == 6 &&
|
|
|
|
boot_cpu_data.x86_model >= 15)
|
|
|
|
return 1;
|
|
|
|
#endif
|
|
|
|
return 0;
|
ondemand: Solve a big performance issue by counting IOWAIT time as busy
The ondemand cpufreq governor uses CPU busy time (e.g. not-idle
time) as a measure for scaling the CPU frequency up or down.
If the CPU is busy, the CPU frequency scales up, if it's idle,
the CPU frequency scales down. Effectively, it uses the CPU busy
time as proxy variable for the more nebulous "how critical is
performance right now" question.
This algorithm falls flat on its face in the light of workloads
where you're alternatingly disk and CPU bound, such as the ever
popular "git grep", but also things like startup of programs and
maildir using email clients... much to the chagarin of Andrew
Morton.
This patch changes the ondemand algorithm to count iowait time
as busy, not idle, time. As shown in the breakdown cases above,
iowait is performance critical often, and by counting iowait,
the proxy variable becomes a more accurate representation of the
"how critical is performance" question.
The problem and fix are both verified with the "perf timechar"
tool.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Jones <davej@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100509082606.3d9f00d0@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-05-09 15:26:06 +00:00
|
|
|
}
|
|
|
|
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
/*
|
|
|
|
* Find right freq to be set now with powersave_bias on.
|
|
|
|
* Returns the freq_hi to be used right now and will set freq_hi_jiffies,
|
|
|
|
* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
|
|
|
|
*/
|
2006-08-13 21:00:08 +00:00
|
|
|
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
|
2012-10-25 22:47:42 +00:00
|
|
|
unsigned int freq_next, unsigned int relation)
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
{
|
|
|
|
unsigned int freq_req, freq_reduc, freq_avg;
|
|
|
|
unsigned int freq_hi, freq_lo;
|
|
|
|
unsigned int index = 0;
|
|
|
|
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
|
2012-10-25 22:47:42 +00:00
|
|
|
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
|
2009-06-24 06:13:48 +00:00
|
|
|
policy->cpu);
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
|
|
|
|
if (!dbs_info->freq_table) {
|
|
|
|
dbs_info->freq_lo = 0;
|
|
|
|
dbs_info->freq_lo_jiffies = 0;
|
|
|
|
return freq_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
|
|
|
|
relation, &index);
|
|
|
|
freq_req = dbs_info->freq_table[index].frequency;
|
2012-10-25 22:47:42 +00:00
|
|
|
freq_reduc = freq_req * od_tuners.powersave_bias / 1000;
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
freq_avg = freq_req - freq_reduc;
|
|
|
|
|
|
|
|
/* Find freq bounds for freq_avg in freq_table */
|
|
|
|
index = 0;
|
|
|
|
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
|
|
|
|
CPUFREQ_RELATION_H, &index);
|
|
|
|
freq_lo = dbs_info->freq_table[index].frequency;
|
|
|
|
index = 0;
|
|
|
|
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
|
|
|
|
CPUFREQ_RELATION_L, &index);
|
|
|
|
freq_hi = dbs_info->freq_table[index].frequency;
|
|
|
|
|
|
|
|
/* Find out how long we have to be in hi and lo freqs */
|
|
|
|
if (freq_hi == freq_lo) {
|
|
|
|
dbs_info->freq_lo = 0;
|
|
|
|
dbs_info->freq_lo_jiffies = 0;
|
|
|
|
return freq_lo;
|
|
|
|
}
|
2012-10-25 22:47:42 +00:00
|
|
|
jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate);
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
|
|
|
|
jiffies_hi += ((freq_hi - freq_lo) / 2);
|
|
|
|
jiffies_hi /= (freq_hi - freq_lo);
|
|
|
|
jiffies_lo = jiffies_total - jiffies_hi;
|
|
|
|
dbs_info->freq_lo = freq_lo;
|
|
|
|
dbs_info->freq_lo_jiffies = jiffies_lo;
|
|
|
|
dbs_info->freq_hi_jiffies = jiffies_hi;
|
|
|
|
return freq_hi;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ondemand_powersave_bias_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for_each_online_cpu(i) {
|
2009-07-03 00:08:32 +00:00
|
|
|
ondemand_powersave_bias_init_cpu(i);
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
|
|
|
|
{
|
|
|
|
if (od_tuners.powersave_bias)
|
|
|
|
freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
|
|
|
|
else if (p->cur == p->max)
|
|
|
|
return;
|
2009-07-24 13:25:06 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
__cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
|
|
|
|
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Every sampling_rate, we check, if current idle time is less than 20%
|
|
|
|
* (default), then we try to increase frequency Every sampling_rate, we look for
|
|
|
|
* a the lowest frequency which can sustain the load while keeping idle time
|
|
|
|
* over 30%. If such a frequency exist, we try to decrease to this frequency.
|
|
|
|
*
|
|
|
|
* Any frequency increase takes it to the maximum frequency. Frequency reduction
|
|
|
|
* happens at minimum steps of 5% (default) of current frequency
|
|
|
|
*/
|
|
|
|
static void od_check_cpu(int cpu, unsigned int load_freq)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2012-10-25 22:47:42 +00:00
|
|
|
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
|
|
|
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
|
|
|
|
|
|
|
|
dbs_info->freq_lo = 0;
|
|
|
|
|
|
|
|
/* Check for frequency increase */
|
|
|
|
if (load_freq > od_tuners.up_threshold * policy->cur) {
|
|
|
|
/* If switching to max speed, apply sampling_down_factor */
|
|
|
|
if (policy->cur < policy->max)
|
|
|
|
dbs_info->rate_mult =
|
|
|
|
od_tuners.sampling_down_factor;
|
|
|
|
dbs_freq_increase(policy, policy->max);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for frequency decrease */
|
|
|
|
/* if we cannot reduce the frequency anymore, break out early */
|
|
|
|
if (policy->cur == policy->min)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The optimal frequency is the frequency that is the lowest that can
|
|
|
|
* support the current CPU usage without triggering the up policy. To be
|
|
|
|
* safe, we focus 10 points under the threshold.
|
|
|
|
*/
|
|
|
|
if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
|
|
|
|
policy->cur) {
|
|
|
|
unsigned int freq_next;
|
|
|
|
freq_next = load_freq / (od_tuners.up_threshold -
|
|
|
|
od_tuners.down_differential);
|
|
|
|
|
|
|
|
/* No longer fully busy, reset rate_mult */
|
|
|
|
dbs_info->rate_mult = 1;
|
|
|
|
|
|
|
|
if (freq_next < policy->min)
|
|
|
|
freq_next = policy->min;
|
|
|
|
|
|
|
|
if (!od_tuners.powersave_bias) {
|
|
|
|
__cpufreq_driver_target(policy, freq_next,
|
|
|
|
CPUFREQ_RELATION_L);
|
|
|
|
} else {
|
|
|
|
int freq = powersave_bias_target(policy, freq_next,
|
|
|
|
CPUFREQ_RELATION_L);
|
|
|
|
__cpufreq_driver_target(policy, freq,
|
|
|
|
CPUFREQ_RELATION_L);
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static void od_dbs_timer(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct od_cpu_dbs_info_s *dbs_info =
|
|
|
|
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
|
|
|
|
unsigned int cpu = dbs_info->cdbs.cpu;
|
|
|
|
int delay, sample_type = dbs_info->sample_type;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
|
|
|
|
|
|
|
/* Common NORMAL_SAMPLE setup */
|
|
|
|
dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
|
|
|
if (sample_type == OD_SUB_SAMPLE) {
|
|
|
|
delay = dbs_info->freq_lo_jiffies;
|
|
|
|
__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
|
|
|
|
dbs_info->freq_lo, CPUFREQ_RELATION_H);
|
|
|
|
} else {
|
|
|
|
dbs_check_cpu(&od_dbs_data, cpu);
|
|
|
|
if (dbs_info->freq_lo) {
|
|
|
|
/* Setup timer for SUB_SAMPLE */
|
|
|
|
dbs_info->sample_type = OD_SUB_SAMPLE;
|
|
|
|
delay = dbs_info->freq_hi_jiffies;
|
|
|
|
} else {
|
2012-11-23 19:48:08 +00:00
|
|
|
delay = delay_for_sampling_rate(od_tuners.sampling_rate
|
|
|
|
* dbs_info->rate_mult);
|
2012-10-25 22:47:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
|
|
|
|
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/************************** sysfs interface ************************/
|
|
|
|
|
|
|
|
static ssize_t show_sampling_rate_min(struct kobject *kobj,
|
|
|
|
struct attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-02-29 08:54:41 +00:00
|
|
|
/**
|
|
|
|
* update_sampling_rate - update sampling rate effective immediately if needed.
|
|
|
|
* @new_rate: new sampling rate
|
|
|
|
*
|
|
|
|
* If new rate is smaller than the old, simply updaing
|
2012-10-25 22:47:42 +00:00
|
|
|
* dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
|
|
|
|
* original sampling_rate was 1 second and the requested new sampling rate is 10
|
|
|
|
* ms because the user needs immediate reaction from ondemand governor, but not
|
|
|
|
* sure if higher frequency will be required or not, then, the governor may
|
|
|
|
* change the sampling rate too late; up to 1 second later. Thus, if we are
|
|
|
|
* reducing the sampling rate, we need to make the new value effective
|
|
|
|
* immediately.
|
2012-02-29 08:54:41 +00:00
|
|
|
*/
|
|
|
|
static void update_sampling_rate(unsigned int new_rate)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
od_tuners.sampling_rate = new_rate = max(new_rate,
|
|
|
|
od_dbs_data.min_sampling_rate);
|
2012-02-29 08:54:41 +00:00
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
struct cpufreq_policy *policy;
|
2012-10-25 22:47:42 +00:00
|
|
|
struct od_cpu_dbs_info_s *dbs_info;
|
2012-02-29 08:54:41 +00:00
|
|
|
unsigned long next_sampling, appointed_at;
|
|
|
|
|
|
|
|
policy = cpufreq_cpu_get(cpu);
|
|
|
|
if (!policy)
|
|
|
|
continue;
|
|
|
|
dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
|
|
|
|
cpufreq_cpu_put(policy);
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
2012-02-29 08:54:41 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
if (!delayed_work_pending(&dbs_info->cdbs.work)) {
|
|
|
|
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
2012-02-29 08:54:41 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
next_sampling = jiffies + usecs_to_jiffies(new_rate);
|
|
|
|
appointed_at = dbs_info->cdbs.work.timer.expires;
|
2012-02-29 08:54:41 +00:00
|
|
|
|
|
|
|
if (time_before(next_sampling, appointed_at)) {
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
|
|
|
cancel_delayed_work_sync(&dbs_info->cdbs.work);
|
|
|
|
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
2012-02-29 08:54:41 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
schedule_delayed_work_on(dbs_info->cdbs.cpu,
|
|
|
|
&dbs_info->cdbs.work,
|
|
|
|
usecs_to_jiffies(new_rate));
|
2012-02-29 08:54:41 +00:00
|
|
|
|
|
|
|
}
|
2012-10-25 22:47:42 +00:00
|
|
|
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
2012-02-29 08:54:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-24 13:25:06 +00:00
|
|
|
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
|
|
|
|
const char *buf, size_t count)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned int input;
|
|
|
|
int ret;
|
2006-06-28 20:52:18 +00:00
|
|
|
ret = sscanf(buf, "%u", &input);
|
2009-07-03 00:08:32 +00:00
|
|
|
if (ret != 1)
|
|
|
|
return -EINVAL;
|
2012-02-29 08:54:41 +00:00
|
|
|
update_sampling_rate(input);
|
2005-04-16 22:20:36 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2010-05-09 15:26:51 +00:00
|
|
|
static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
unsigned int input;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = sscanf(buf, "%u", &input);
|
|
|
|
if (ret != 1)
|
|
|
|
return -EINVAL;
|
2012-10-25 22:47:42 +00:00
|
|
|
od_tuners.io_is_busy = !!input;
|
2010-05-09 15:26:51 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2009-07-24 13:25:06 +00:00
|
|
|
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
|
|
|
|
const char *buf, size_t count)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned int input;
|
|
|
|
int ret;
|
2006-06-28 20:52:18 +00:00
|
|
|
ret = sscanf(buf, "%u", &input);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-02-28 05:43:23 +00:00
|
|
|
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
|
2005-06-01 02:03:50 +00:00
|
|
|
input < MIN_FREQUENCY_UP_THRESHOLD) {
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-10-25 22:47:42 +00:00
|
|
|
od_tuners.up_threshold = input;
|
2005-04-16 22:20:36 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2010-10-06 20:54:24 +00:00
|
|
|
static ssize_t store_sampling_down_factor(struct kobject *a,
|
|
|
|
struct attribute *b, const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
unsigned int input, j;
|
|
|
|
int ret;
|
|
|
|
ret = sscanf(buf, "%u", &input);
|
|
|
|
|
|
|
|
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
|
|
|
|
return -EINVAL;
|
2012-10-25 22:47:42 +00:00
|
|
|
od_tuners.sampling_down_factor = input;
|
2010-10-06 20:54:24 +00:00
|
|
|
|
|
|
|
/* Reset down sampling multiplier in case it was active */
|
|
|
|
for_each_online_cpu(j) {
|
2012-10-25 22:47:42 +00:00
|
|
|
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
|
|
|
|
j);
|
2010-10-06 20:54:24 +00:00
|
|
|
dbs_info->rate_mult = 1;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2009-07-24 13:25:06 +00:00
|
|
|
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
|
|
|
|
const char *buf, size_t count)
|
2005-06-01 02:03:47 +00:00
|
|
|
{
|
|
|
|
unsigned int input;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
unsigned int j;
|
2006-02-28 05:43:23 +00:00
|
|
|
|
2006-06-28 20:52:18 +00:00
|
|
|
ret = sscanf(buf, "%u", &input);
|
2009-01-18 06:43:44 +00:00
|
|
|
if (ret != 1)
|
2005-06-01 02:03:47 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2009-01-18 06:43:44 +00:00
|
|
|
if (input > 1)
|
2005-06-01 02:03:47 +00:00
|
|
|
input = 1;
|
2006-02-28 05:43:23 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
if (input == od_tuners.ignore_nice) { /* nothing to do */
|
2005-06-01 02:03:47 +00:00
|
|
|
return count;
|
|
|
|
}
|
2012-10-25 22:47:42 +00:00
|
|
|
od_tuners.ignore_nice = input;
|
2005-06-01 02:03:47 +00:00
|
|
|
|
2006-06-28 20:49:52 +00:00
|
|
|
/* we need to re-evaluate prev_cpu_idle */
|
2005-06-01 02:03:49 +00:00
|
|
|
for_each_online_cpu(j) {
|
2012-10-25 22:47:42 +00:00
|
|
|
struct od_cpu_dbs_info_s *dbs_info;
|
2009-06-24 06:13:48 +00:00
|
|
|
dbs_info = &per_cpu(od_cpu_dbs_info, j);
|
2012-10-25 22:47:42 +00:00
|
|
|
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
|
|
|
|
&dbs_info->cdbs.prev_cpu_wall);
|
|
|
|
if (od_tuners.ignore_nice)
|
|
|
|
dbs_info->cdbs.prev_cpu_nice =
|
|
|
|
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
2009-01-23 14:25:02 +00:00
|
|
|
|
2005-06-01 02:03:47 +00:00
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2009-07-24 13:25:06 +00:00
|
|
|
static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
|
|
|
|
const char *buf, size_t count)
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
{
|
|
|
|
unsigned int input;
|
|
|
|
int ret;
|
|
|
|
ret = sscanf(buf, "%u", &input);
|
|
|
|
|
|
|
|
if (ret != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (input > 1000)
|
|
|
|
input = 1000;
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
od_tuners.powersave_bias = input;
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
ondemand_powersave_bias_init();
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
show_one(od, sampling_rate, sampling_rate);
|
|
|
|
show_one(od, io_is_busy, io_is_busy);
|
|
|
|
show_one(od, up_threshold, up_threshold);
|
|
|
|
show_one(od, sampling_down_factor, sampling_down_factor);
|
|
|
|
show_one(od, ignore_nice_load, ignore_nice);
|
|
|
|
show_one(od, powersave_bias, powersave_bias);
|
|
|
|
|
2010-03-31 19:56:46 +00:00
|
|
|
define_one_global_rw(sampling_rate);
|
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, hypervisor: add missing <linux/module.h>
Modify the VMware balloon driver for the new x86_hyper API
x86, hypervisor: Export the x86_hyper* symbols
x86: Clean up the hypervisor layer
x86, HyperV: fix up the license to mshyperv.c
x86: Detect running on a Microsoft HyperV system
x86, cpu: Make APERF/MPERF a normal table-driven flag
x86, k8: Fix build error when K8_NB is disabled
x86, cacheinfo: Disable index in all four subcaches
x86, cacheinfo: Make L3 cache info per node
x86, cacheinfo: Reorganize AMD L3 cache structure
x86, cacheinfo: Turn off L3 cache index disable feature in virtualized environments
x86, cacheinfo: Unify AMD L3 cache index disable checking
cpufreq: Unify sysfs attribute definition macros
powernow-k8: Fix frequency reporting
x86, cpufreq: Add APERF/MPERF support for AMD processors
x86: Unify APERF/MPERF support
powernow-k8: Add core performance boost support
x86, cpu: Add AMD core boosting feature flag to /proc/cpuinfo
Fix up trivial conflicts in arch/x86/kernel/cpu/intel_cacheinfo.c and
drivers/cpufreq/cpufreq_ondemand.c
2010-05-18 15:49:13 +00:00
|
|
|
define_one_global_rw(io_is_busy);
|
2010-03-31 19:56:46 +00:00
|
|
|
define_one_global_rw(up_threshold);
|
2010-10-06 20:54:24 +00:00
|
|
|
define_one_global_rw(sampling_down_factor);
|
2010-03-31 19:56:46 +00:00
|
|
|
define_one_global_rw(ignore_nice_load);
|
|
|
|
define_one_global_rw(powersave_bias);
|
2012-10-25 22:47:42 +00:00
|
|
|
define_one_global_ro(sampling_rate_min);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-01-18 06:43:44 +00:00
|
|
|
static struct attribute *dbs_attributes[] = {
|
2005-04-16 22:20:36 +00:00
|
|
|
&sampling_rate_min.attr,
|
|
|
|
&sampling_rate.attr,
|
|
|
|
&up_threshold.attr,
|
2010-10-06 20:54:24 +00:00
|
|
|
&sampling_down_factor.attr,
|
2005-12-01 09:09:25 +00:00
|
|
|
&ignore_nice_load.attr,
|
[CPUFREQ][2/2] ondemand: updated add powersave_bias tunable
ondemand selects the minimum frequency that can retire
a workload with negligible idle time -- ideally resulting in the highest
performance/power efficiency with negligible performance impact.
But on some systems and some workloads, this algorithm
is more performance biased than necessary, and
de-tuning it a bit to allow some performance impact
can save measurable power.
This patch adds a "powersave_bias" tunable to ondemand
to allow it to reduce its target frequency by a specified percent.
By default, the powersave_bias is 0 and has no effect.
powersave_bias is in units of 0.1%, so it has an effective range
of 1 through 1000, resulting in 0.1% to 100% impact.
In practice, users will not be able to detect a difference between
0.1% increments, but 1.0% increments turned out to be too large.
Also, the max value of 1000 (100%) would simply peg the system
in its deepest power saving P-state, unless the processor really has
a hardware P-state at 0Hz:-)
For example, If ondemand requests 2.0GHz based on utilization,
and powersave_bias=100, this code will knock 10% off the target
and seek a target of 1.8GHz instead of 2.0GHz until the
next sampling. If 1.8 is an exact match with an hardware frequency
we use it, otherwise we average our time between the frequency
next higher than 1.8 and next lower than 1.8.
Note that a user or administrative program can change powersave_bias
at run-time depending on how they expect the system to be used.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy at intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
2006-07-31 18:28:12 +00:00
|
|
|
&powersave_bias.attr,
|
2010-05-09 15:26:51 +00:00
|
|
|
&io_is_busy.attr,
|
2005-04-16 22:20:36 +00:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static struct attribute_group od_attr_group = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.attrs = dbs_attributes,
|
|
|
|
.name = "ondemand",
|
|
|
|
};
|
|
|
|
|
|
|
|
/************************** sysfs end ************************/
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
define_get_cpu_dbs_routines(od_cpu_dbs_info);
|
ondemand: Solve a big performance issue by counting IOWAIT time as busy
The ondemand cpufreq governor uses CPU busy time (e.g. not-idle
time) as a measure for scaling the CPU frequency up or down.
If the CPU is busy, the CPU frequency scales up, if it's idle,
the CPU frequency scales down. Effectively, it uses the CPU busy
time as proxy variable for the more nebulous "how critical is
performance right now" question.
This algorithm falls flat on its face in the light of workloads
where you're alternatingly disk and CPU bound, such as the ever
popular "git grep", but also things like startup of programs and
maildir using email clients... much to the chagarin of Andrew
Morton.
This patch changes the ondemand algorithm to count iowait time
as busy, not idle, time. As shown in the breakdown cases above,
iowait is performance critical often, and by counting iowait,
the proxy variable becomes a more accurate representation of the
"how critical is performance" question.
The problem and fix are both verified with the "perf timechar"
tool.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Jones <davej@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100509082606.3d9f00d0@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-05-09 15:26:06 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static struct od_ops od_ops = {
|
|
|
|
.io_busy = should_io_be_busy,
|
|
|
|
.powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
|
|
|
|
.powersave_bias_target = powersave_bias_target,
|
|
|
|
.freq_increase = dbs_freq_increase,
|
|
|
|
};
|
2006-06-28 20:51:19 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static struct dbs_data od_dbs_data = {
|
|
|
|
.governor = GOV_ONDEMAND,
|
|
|
|
.attr_group = &od_attr_group,
|
|
|
|
.tuners = &od_tuners,
|
|
|
|
.get_cpu_cdbs = get_cpu_cdbs,
|
|
|
|
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
|
|
|
|
.gov_dbs_timer = od_dbs_timer,
|
|
|
|
.gov_check_cpu = od_check_cpu,
|
|
|
|
.gov_ops = &od_ops,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|
|
|
unsigned int event)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2012-10-25 22:47:42 +00:00
|
|
|
return cpufreq_governor_dbs(&od_dbs_data, policy, event);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
|
|
|
|
static
|
2010-05-09 15:26:51 +00:00
|
|
|
#endif
|
2012-10-25 22:47:42 +00:00
|
|
|
struct cpufreq_governor cpufreq_gov_ondemand = {
|
|
|
|
.name = "ondemand",
|
|
|
|
.governor = od_cpufreq_governor_dbs,
|
|
|
|
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static int __init cpufreq_gov_dbs_init(void)
|
|
|
|
{
|
2008-09-18 10:43:40 +00:00
|
|
|
u64 idle_time;
|
|
|
|
int cpu = get_cpu();
|
2008-08-04 18:59:12 +00:00
|
|
|
|
2012-10-25 22:47:42 +00:00
|
|
|
mutex_init(&od_dbs_data.mutex);
|
2011-12-09 10:48:42 +00:00
|
|
|
idle_time = get_cpu_idle_time_us(cpu, NULL);
|
2008-09-18 10:43:40 +00:00
|
|
|
put_cpu();
|
2008-08-04 18:59:12 +00:00
|
|
|
if (idle_time != -1ULL) {
|
|
|
|
/* Idle micro accounting is supported. Use finer thresholds */
|
2012-10-25 22:47:42 +00:00
|
|
|
od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
|
|
|
|
od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
|
2009-04-22 11:48:29 +00:00
|
|
|
/*
|
2011-08-06 12:33:43 +00:00
|
|
|
* In nohz/micro accounting case we set the minimum frequency
|
2009-04-22 11:48:29 +00:00
|
|
|
* not depending on HZ, but fixed (very low). The deferred
|
|
|
|
* timer might skip some samples if idle/sleeping as needed.
|
|
|
|
*/
|
2012-10-25 22:47:42 +00:00
|
|
|
od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
|
2009-04-22 11:48:29 +00:00
|
|
|
} else {
|
|
|
|
/* For correct statistics, we need 10 ticks for each measure */
|
2012-10-25 22:47:42 +00:00
|
|
|
od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
|
|
|
jiffies_to_usecs(10);
|
2008-08-04 18:59:12 +00:00
|
|
|
}
|
2008-07-14 03:00:45 +00:00
|
|
|
|
2011-01-26 11:12:50 +00:00
|
|
|
return cpufreq_register_governor(&cpufreq_gov_ondemand);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit cpufreq_gov_dbs_exit(void)
|
|
|
|
{
|
2007-10-02 20:28:12 +00:00
|
|
|
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-06-28 20:52:18 +00:00
|
|
|
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
|
|
|
|
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
|
|
|
|
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
|
2009-01-18 06:43:44 +00:00
|
|
|
"Low Latency Frequency Transition capable processors");
|
2006-06-28 20:52:18 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-17 23:21:08 +00:00
|
|
|
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
|
|
|
|
fs_initcall(cpufreq_gov_dbs_init);
|
|
|
|
#else
|
2005-04-16 22:20:36 +00:00
|
|
|
module_init(cpufreq_gov_dbs_init);
|
2008-01-17 23:21:08 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
module_exit(cpufreq_gov_dbs_exit);
|