forked from Minki/linux
cpupower: mperf_monitor: Introduce per_cpu_schedule flag
The per_cpu_schedule flag is used to move the cpupower process to the cpu on which we are looking to read the APERF/MPERF registers. This prevents IPIs from being generated by read_msr()s as we are already on the cpu of interest. Ex: If cpupower is running on CPU 0 and we execute read_msr(20, MSR_APERF, val) then, read_msr(20, MSR_MPERF, val) the msr module will generate an IPI from CPU 0 to CPU 20 to query for the MSR_APERF and then the MSR_MPERF in separate IPIs. This delay, caused by IPI latency, between reading the APERF and MPERF registers may cause both of them to go out of sync. The use of the per_cpu_schedule flag reduces the probability of this from happening. It comes at the cost of a negligible increase in cpu consumption caused by the migration of cpupower across each of the cpus of the system. Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com> Acked-by: Thomas Renninger <trenn@suse.de> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
This commit is contained in:
parent
d3f5d2a192
commit
7adafe541f
@ -62,6 +62,7 @@ struct cpuidle_monitor {
|
||||
unsigned int overflow_s;
|
||||
struct {
|
||||
unsigned int needs_root:1;
|
||||
unsigned int per_cpu_schedule:1;
|
||||
} flags;
|
||||
};
|
||||
|
||||
|
@ -86,15 +86,35 @@ static int mperf_get_tsc(unsigned long long *tsc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mperf_init_stats(unsigned int cpu)
|
||||
static int get_aperf_mperf(int cpu, unsigned long long *aval,
|
||||
unsigned long long *mval)
|
||||
{
|
||||
unsigned long long val;
|
||||
int ret;
|
||||
|
||||
ret = read_msr(cpu, MSR_APERF, &val);
|
||||
aperf_previous_count[cpu] = val;
|
||||
ret |= read_msr(cpu, MSR_MPERF, &val);
|
||||
mperf_previous_count[cpu] = val;
|
||||
/*
|
||||
* Running on the cpu from which we read the registers will
|
||||
* prevent APERF/MPERF from going out of sync because of IPI
|
||||
* latency introduced by read_msr()s.
|
||||
*/
|
||||
if (mperf_monitor.flags.per_cpu_schedule) {
|
||||
if (bind_cpu(cpu))
|
||||
return 1;
|
||||
}
|
||||
|
||||
ret = read_msr(cpu, MSR_APERF, aval);
|
||||
ret |= read_msr(cpu, MSR_MPERF, mval);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mperf_init_stats(unsigned int cpu)
|
||||
{
|
||||
unsigned long long aval, mval;
|
||||
int ret;
|
||||
|
||||
ret = get_aperf_mperf(cpu, &aval, &mval);
|
||||
aperf_previous_count[cpu] = aval;
|
||||
mperf_previous_count[cpu] = mval;
|
||||
is_valid[cpu] = !ret;
|
||||
|
||||
return 0;
|
||||
@ -102,13 +122,12 @@ static int mperf_init_stats(unsigned int cpu)
|
||||
|
||||
static int mperf_measure_stats(unsigned int cpu)
|
||||
{
|
||||
unsigned long long val;
|
||||
unsigned long long aval, mval;
|
||||
int ret;
|
||||
|
||||
ret = read_msr(cpu, MSR_APERF, &val);
|
||||
aperf_current_count[cpu] = val;
|
||||
ret |= read_msr(cpu, MSR_MPERF, &val);
|
||||
mperf_current_count[cpu] = val;
|
||||
ret = get_aperf_mperf(cpu, &aval, &mval);
|
||||
aperf_current_count[cpu] = aval;
|
||||
mperf_current_count[cpu] = mval;
|
||||
is_valid[cpu] = !ret;
|
||||
|
||||
return 0;
|
||||
@ -305,6 +324,9 @@ struct cpuidle_monitor *mperf_register(void)
|
||||
if (init_maxfreq_mode())
|
||||
return NULL;
|
||||
|
||||
if (cpupower_cpu_info.vendor == X86_VENDOR_AMD)
|
||||
mperf_monitor.flags.per_cpu_schedule = 1;
|
||||
|
||||
/* Free this at program termination */
|
||||
is_valid = calloc(cpu_count, sizeof(int));
|
||||
mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
|
||||
|
Loading…
Reference in New Issue
Block a user