2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* @file nmi_int.c
|
|
|
|
*
|
2009-07-08 11:49:38 +00:00
|
|
|
* @remark Copyright 2002-2009 OProfile authors
|
2005-04-16 22:20:36 +00:00
|
|
|
* @remark Read the file COPYING
|
|
|
|
*
|
|
|
|
* @author John Levon <levon@movementarian.org>
|
2008-07-22 19:08:48 +00:00
|
|
|
* @author Robert Richter <robert.richter@amd.com>
|
2009-07-08 11:49:38 +00:00
|
|
|
* @author Barry Kasindorf <barry.kasindorf@amd.com>
|
|
|
|
* @author Jason Yeh <jason.yeh@amd.com>
|
|
|
|
* @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/oprofile.h>
|
2011-03-23 21:15:54 +00:00
|
|
|
#include <linux/syscore_ops.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/slab.h>
|
2006-07-10 15:06:21 +00:00
|
|
|
#include <linux/moduleparam.h>
|
2007-05-08 07:27:03 +00:00
|
|
|
#include <linux/kdebug.h>
|
2008-08-19 01:13:38 +00:00
|
|
|
#include <linux/cpu.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/nmi.h>
|
|
|
|
#include <asm/msr.h>
|
|
|
|
#include <asm/apic.h>
|
2008-01-30 12:32:33 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "op_counter.h"
|
|
|
|
#include "op_x86_model.h"
|
2006-09-26 08:52:27 +00:00
|
|
|
|
2009-07-09 13:12:35 +00:00
|
|
|
static struct op_x86_model_spec *model;
|
2008-03-25 22:06:59 +00:00
|
|
|
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
|
|
|
|
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
|
2006-09-26 08:52:27 +00:00
|
|
|
|
2010-04-29 12:55:55 +00:00
|
|
|
/* must be protected with get_online_cpus()/put_online_cpus(): */
|
|
|
|
static int nmi_enabled;
|
|
|
|
static int ctr_running;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-07-08 11:49:38 +00:00
|
|
|
struct op_counter_config counter_config[OP_MAX_COUNTER];
|
|
|
|
|
2009-05-25 13:10:32 +00:00
|
|
|
/* common functions */
|
|
|
|
|
|
|
|
u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
|
|
|
|
struct op_counter_config *counter_config)
|
|
|
|
{
|
|
|
|
u64 val = 0;
|
|
|
|
u16 event = (u16)counter_config->event;
|
|
|
|
|
|
|
|
val |= ARCH_PERFMON_EVENTSEL_INT;
|
|
|
|
val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
|
|
|
|
val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
|
|
|
|
val |= (counter_config->unit_mask & 0xFF) << 8;
|
2011-03-16 19:44:33 +00:00
|
|
|
counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
|
|
|
|
ARCH_PERFMON_EVENTSEL_EDGE |
|
|
|
|
ARCH_PERFMON_EVENTSEL_CMASK);
|
|
|
|
val |= counter_config->extra;
|
2009-05-25 13:10:32 +00:00
|
|
|
event &= model->event_mask ? model->event_mask : 0xFF;
|
|
|
|
val |= event & 0xFF;
|
2012-10-10 07:18:35 +00:00
|
|
|
val |= (u64)(event & 0x0F00) << 24;
|
2009-05-25 13:10:32 +00:00
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-30 19:06:21 +00:00
|
|
|
static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (ctr_running)
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:40 +00:00
|
|
|
model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
|
2011-09-30 19:06:21 +00:00
|
|
|
else if (!nmi_enabled)
|
|
|
|
return NMI_DONE;
|
|
|
|
else
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:40 +00:00
|
|
|
model->stop(this_cpu_ptr(&cpu_msrs));
|
2011-09-30 19:06:21 +00:00
|
|
|
return NMI_HANDLED;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-09-26 08:52:27 +00:00
|
|
|
|
2008-01-30 12:32:33 +00:00
|
|
|
static void nmi_cpu_save_registers(struct op_msrs *msrs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:32:33 +00:00
|
|
|
struct op_msr *counters = msrs->counters;
|
|
|
|
struct op_msr *controls = msrs->controls;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2009-06-05 13:54:24 +00:00
|
|
|
for (i = 0; i < model->num_counters; ++i) {
|
2009-06-03 17:09:27 +00:00
|
|
|
if (counters[i].addr)
|
|
|
|
rdmsrl(counters[i].addr, counters[i].saved);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-30 12:32:33 +00:00
|
|
|
|
2009-06-05 13:54:24 +00:00
|
|
|
for (i = 0; i < model->num_controls; ++i) {
|
2009-06-03 17:09:27 +00:00
|
|
|
if (controls[i].addr)
|
|
|
|
rdmsrl(controls[i].addr, controls[i].saved);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-09 12:38:49 +00:00
|
|
|
static void nmi_cpu_start(void *dummy)
|
|
|
|
{
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:40 +00:00
|
|
|
struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
|
2010-05-03 17:44:32 +00:00
|
|
|
if (!msrs->controls)
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
else
|
|
|
|
model->start(msrs);
|
2009-07-09 12:38:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nmi_start(void)
|
|
|
|
{
|
2010-04-29 12:55:55 +00:00
|
|
|
get_online_cpus();
|
|
|
|
ctr_running = 1;
|
2011-06-01 13:31:44 +00:00
|
|
|
/* make ctr_running visible to the nmi handler: */
|
|
|
|
smp_mb();
|
|
|
|
on_each_cpu(nmi_cpu_start, NULL, 1);
|
2010-04-29 12:55:55 +00:00
|
|
|
put_online_cpus();
|
2009-07-09 12:38:49 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nmi_cpu_stop(void *dummy)
|
|
|
|
{
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:40 +00:00
|
|
|
struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
|
2010-05-03 17:44:32 +00:00
|
|
|
if (!msrs->controls)
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
else
|
|
|
|
model->stop(msrs);
|
2009-07-09 12:38:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nmi_stop(void)
|
|
|
|
{
|
2010-04-29 12:55:55 +00:00
|
|
|
get_online_cpus();
|
2009-07-09 12:38:49 +00:00
|
|
|
on_each_cpu(nmi_cpu_stop, NULL, 1);
|
2010-04-29 12:55:55 +00:00
|
|
|
ctr_running = 0;
|
|
|
|
put_online_cpus();
|
2009-07-09 12:38:49 +00:00
|
|
|
}
|
|
|
|
|
2009-07-16 11:04:43 +00:00
|
|
|
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(int, switch_index);
|
|
|
|
|
2009-07-09 13:11:45 +00:00
|
|
|
static inline int has_mux(void)
|
|
|
|
{
|
|
|
|
return !!model->switch_ctrl;
|
|
|
|
}
|
|
|
|
|
2009-07-16 11:04:43 +00:00
|
|
|
inline int op_x86_phys_to_virt(int phys)
|
|
|
|
{
|
2010-12-18 15:28:55 +00:00
|
|
|
return __this_cpu_read(switch_index) + phys;
|
2009-07-16 11:04:43 +00:00
|
|
|
}
|
|
|
|
|
2009-07-10 13:47:17 +00:00
|
|
|
inline int op_x86_virt_to_phys(int virt)
|
|
|
|
{
|
|
|
|
return virt % model->num_counters;
|
|
|
|
}
|
|
|
|
|
2009-07-09 12:40:04 +00:00
|
|
|
static void nmi_shutdown_mux(void)
|
|
|
|
{
|
|
|
|
int i;
|
2009-07-09 13:11:45 +00:00
|
|
|
|
|
|
|
if (!has_mux())
|
|
|
|
return;
|
|
|
|
|
2009-07-09 12:40:04 +00:00
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
kfree(per_cpu(cpu_msrs, i).multiplex);
|
|
|
|
per_cpu(cpu_msrs, i).multiplex = NULL;
|
|
|
|
per_cpu(switch_index, i) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nmi_setup_mux(void)
|
|
|
|
{
|
|
|
|
size_t multiplex_size =
|
|
|
|
sizeof(struct op_msr) * model->num_virt_counters;
|
|
|
|
int i;
|
2009-07-09 13:11:45 +00:00
|
|
|
|
|
|
|
if (!has_mux())
|
|
|
|
return 1;
|
|
|
|
|
2009-07-09 12:40:04 +00:00
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
per_cpu(cpu_msrs, i).multiplex =
|
2010-02-25 19:20:25 +00:00
|
|
|
kzalloc(multiplex_size, GFP_KERNEL);
|
2009-07-09 12:40:04 +00:00
|
|
|
if (!per_cpu(cpu_msrs, i).multiplex)
|
|
|
|
return 0;
|
|
|
|
}
|
2009-07-09 13:11:45 +00:00
|
|
|
|
2009-07-09 12:40:04 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-07-09 12:38:49 +00:00
|
|
|
static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct op_msr *multiplex = msrs->multiplex;
|
|
|
|
|
2009-07-09 13:11:45 +00:00
|
|
|
if (!has_mux())
|
|
|
|
return;
|
|
|
|
|
2009-07-09 12:38:49 +00:00
|
|
|
for (i = 0; i < model->num_virt_counters; ++i) {
|
|
|
|
if (counter_config[i].enabled) {
|
|
|
|
multiplex[i].saved = -(u64)counter_config[i].count;
|
|
|
|
} else {
|
|
|
|
multiplex[i].saved = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
per_cpu(switch_index, cpu) = 0;
|
|
|
|
}
|
|
|
|
|
2009-07-09 12:38:49 +00:00
|
|
|
static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
|
|
|
|
{
|
2010-02-25 18:16:46 +00:00
|
|
|
struct op_msr *counters = msrs->counters;
|
2009-07-09 12:38:49 +00:00
|
|
|
struct op_msr *multiplex = msrs->multiplex;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < model->num_counters; ++i) {
|
|
|
|
int virt = op_x86_phys_to_virt(i);
|
2010-02-25 18:16:46 +00:00
|
|
|
if (counters[i].addr)
|
|
|
|
rdmsrl(counters[i].addr, multiplex[virt].saved);
|
2009-07-09 12:38:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
|
|
|
|
{
|
2010-02-25 18:16:46 +00:00
|
|
|
struct op_msr *counters = msrs->counters;
|
2009-07-09 12:38:49 +00:00
|
|
|
struct op_msr *multiplex = msrs->multiplex;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < model->num_counters; ++i) {
|
|
|
|
int virt = op_x86_phys_to_virt(i);
|
2010-02-25 18:16:46 +00:00
|
|
|
if (counters[i].addr)
|
|
|
|
wrmsrl(counters[i].addr, multiplex[virt].saved);
|
2009-07-09 12:38:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-09 12:38:49 +00:00
|
|
|
static void nmi_cpu_switch(void *dummy)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
int si = per_cpu(switch_index, cpu);
|
|
|
|
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
|
|
|
|
|
|
|
|
nmi_cpu_stop(NULL);
|
|
|
|
nmi_cpu_save_mpx_registers(msrs);
|
|
|
|
|
|
|
|
/* move to next set */
|
|
|
|
si += model->num_counters;
|
2010-01-18 17:25:36 +00:00
|
|
|
if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
|
2009-07-09 12:38:49 +00:00
|
|
|
per_cpu(switch_index, cpu) = 0;
|
|
|
|
else
|
|
|
|
per_cpu(switch_index, cpu) = si;
|
|
|
|
|
|
|
|
model->switch_ctrl(model, msrs);
|
|
|
|
nmi_cpu_restore_mpx_registers(msrs);
|
|
|
|
|
|
|
|
nmi_cpu_start(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Quick check to see if multiplexing is necessary.
|
|
|
|
* The check should be sufficient since counters are used
|
|
|
|
* in ordre.
|
|
|
|
*/
|
|
|
|
static int nmi_multiplex_on(void)
|
|
|
|
{
|
|
|
|
return counter_config[model->num_counters].count ? 0 : -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nmi_switch_event(void)
|
|
|
|
{
|
2009-07-09 13:11:45 +00:00
|
|
|
if (!has_mux())
|
2009-07-09 12:38:49 +00:00
|
|
|
return -ENOSYS; /* not implemented */
|
|
|
|
if (nmi_multiplex_on() < 0)
|
|
|
|
return -EINVAL; /* not necessary */
|
|
|
|
|
2010-04-29 12:55:55 +00:00
|
|
|
get_online_cpus();
|
|
|
|
if (ctr_running)
|
|
|
|
on_each_cpu(nmi_cpu_switch, NULL, 1);
|
|
|
|
put_online_cpus();
|
2009-07-09 12:38:49 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-09 14:02:44 +00:00
|
|
|
static inline void mux_init(struct oprofile_operations *ops)
|
|
|
|
{
|
|
|
|
if (has_mux())
|
|
|
|
ops->switch_events = nmi_switch_event;
|
|
|
|
}
|
|
|
|
|
2009-07-09 19:42:51 +00:00
|
|
|
static void mux_clone(int cpu)
|
|
|
|
{
|
|
|
|
if (!has_mux())
|
|
|
|
return;
|
|
|
|
|
|
|
|
memcpy(per_cpu(cpu_msrs, cpu).multiplex,
|
|
|
|
per_cpu(cpu_msrs, 0).multiplex,
|
|
|
|
sizeof(struct op_msr) * model->num_virt_counters);
|
|
|
|
}
|
|
|
|
|
2009-07-16 11:04:43 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
inline int op_x86_phys_to_virt(int phys) { return phys; }
|
2009-07-10 13:47:17 +00:00
|
|
|
inline int op_x86_virt_to_phys(int virt) { return virt; }
|
2009-07-09 12:40:04 +00:00
|
|
|
static inline void nmi_shutdown_mux(void) { }
|
|
|
|
static inline int nmi_setup_mux(void) { return 1; }
|
2009-07-09 12:38:49 +00:00
|
|
|
static inline void
|
|
|
|
nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
|
2009-07-09 14:02:44 +00:00
|
|
|
static inline void mux_init(struct oprofile_operations *ops) { }
|
2009-07-09 19:42:51 +00:00
|
|
|
static void mux_clone(int cpu) { }
|
2009-07-16 11:04:43 +00:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static void free_msrs(void)
|
|
|
|
{
|
|
|
|
int i;
|
2006-03-28 09:56:39 +00:00
|
|
|
for_each_possible_cpu(i) {
|
2008-03-25 22:06:59 +00:00
|
|
|
kfree(per_cpu(cpu_msrs, i).counters);
|
|
|
|
per_cpu(cpu_msrs, i).counters = NULL;
|
|
|
|
kfree(per_cpu(cpu_msrs, i).controls);
|
|
|
|
per_cpu(cpu_msrs, i).controls = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-03-23 18:09:51 +00:00
|
|
|
nmi_shutdown_mux();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int allocate_msrs(void)
|
|
|
|
{
|
|
|
|
size_t controls_size = sizeof(struct op_msr) * model->num_controls;
|
|
|
|
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
|
|
|
|
|
2008-09-24 09:08:52 +00:00
|
|
|
int i;
|
2007-06-01 07:46:39 +00:00
|
|
|
for_each_possible_cpu(i) {
|
2010-02-25 19:20:25 +00:00
|
|
|
per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
|
2009-07-09 12:40:04 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!per_cpu(cpu_msrs, i).counters)
|
2010-03-23 18:09:51 +00:00
|
|
|
goto fail;
|
2010-02-25 19:20:25 +00:00
|
|
|
per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
|
2009-07-09 12:40:04 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!per_cpu(cpu_msrs, i).controls)
|
2010-03-23 18:09:51 +00:00
|
|
|
goto fail;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-03-23 18:09:51 +00:00
|
|
|
if (!nmi_setup_mux())
|
|
|
|
goto fail;
|
|
|
|
|
2009-07-09 12:40:04 +00:00
|
|
|
return 1;
|
2010-03-23 18:09:51 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
free_msrs();
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:32:33 +00:00
|
|
|
static void nmi_cpu_setup(void *dummy)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
2008-03-25 22:06:59 +00:00
|
|
|
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
|
2009-07-09 16:33:02 +00:00
|
|
|
nmi_cpu_save_registers(msrs);
|
2009-07-25 14:18:34 +00:00
|
|
|
raw_spin_lock(&oprofilefs_lock);
|
2009-05-25 17:31:44 +00:00
|
|
|
model->setup_ctrs(model, msrs);
|
2009-07-09 17:23:50 +00:00
|
|
|
nmi_cpu_setup_mux(cpu, msrs);
|
2009-07-25 14:18:34 +00:00
|
|
|
raw_spin_unlock(&oprofilefs_lock);
|
2008-03-25 22:06:59 +00:00
|
|
|
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
|
2005-04-16 22:20:36 +00:00
|
|
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
|
|
|
}
|
|
|
|
|
2009-07-09 16:33:02 +00:00
|
|
|
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:32:33 +00:00
|
|
|
struct op_msr *counters = msrs->counters;
|
|
|
|
struct op_msr *controls = msrs->controls;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2009-06-05 13:54:24 +00:00
|
|
|
for (i = 0; i < model->num_controls; ++i) {
|
2009-06-03 17:09:27 +00:00
|
|
|
if (controls[i].addr)
|
|
|
|
wrmsrl(controls[i].addr, controls[i].saved);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-30 12:32:33 +00:00
|
|
|
|
2009-06-05 13:54:24 +00:00
|
|
|
for (i = 0; i < model->num_counters; ++i) {
|
2009-06-03 17:09:27 +00:00
|
|
|
if (counters[i].addr)
|
|
|
|
wrmsrl(counters[i].addr, counters[i].saved);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:32:33 +00:00
|
|
|
static void nmi_cpu_shutdown(void *dummy)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned int v;
|
|
|
|
int cpu = smp_processor_id();
|
2009-07-09 14:29:34 +00:00
|
|
|
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
|
2008-01-30 12:32:33 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* restoring APIC_LVTPC can trigger an apic error because the delivery
|
|
|
|
* mode and vector nr combination can be illegal. That's by design: on
|
|
|
|
* power on apic lvt contain a zero vector nr which are legal only for
|
|
|
|
* NMI delivery mode. So inhibit apic err before restoring lvtpc
|
|
|
|
*/
|
|
|
|
v = apic_read(APIC_LVTERR);
|
|
|
|
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
|
2008-03-25 22:06:59 +00:00
|
|
|
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
|
2005-04-16 22:20:36 +00:00
|
|
|
apic_write(APIC_LVTERR, v);
|
2009-07-09 16:33:02 +00:00
|
|
|
nmi_cpu_restore_registers(msrs);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-04-29 12:55:55 +00:00
|
|
|
static void nmi_cpu_up(void *dummy)
|
|
|
|
{
|
|
|
|
if (nmi_enabled)
|
|
|
|
nmi_cpu_setup(dummy);
|
|
|
|
if (ctr_running)
|
|
|
|
nmi_cpu_start(dummy);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nmi_cpu_down(void *dummy)
|
|
|
|
{
|
|
|
|
if (ctr_running)
|
|
|
|
nmi_cpu_stop(dummy);
|
|
|
|
if (nmi_enabled)
|
|
|
|
nmi_cpu_shutdown(dummy);
|
|
|
|
}
|
|
|
|
|
2013-07-19 11:52:42 +00:00
|
|
|
static int nmi_create_files(struct dentry *root)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2009-07-08 11:49:38 +00:00
|
|
|
for (i = 0; i < model->num_virt_counters; ++i) {
|
2008-01-30 12:32:33 +00:00
|
|
|
struct dentry *dir;
|
2006-06-26 07:24:34 +00:00
|
|
|
char buf[4];
|
2008-01-30 12:32:33 +00:00
|
|
|
|
|
|
|
/* quick little hack to _not_ expose a counter if it is not
|
2006-09-26 08:52:26 +00:00
|
|
|
* available for use. This should protect userspace app.
|
|
|
|
* NOTE: assumes 1:1 mapping here (that counters are organized
|
|
|
|
* sequentially in their struct assignment).
|
|
|
|
*/
|
2009-07-10 16:15:21 +00:00
|
|
|
if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
|
2006-09-26 08:52:26 +00:00
|
|
|
continue;
|
|
|
|
|
2006-06-26 07:24:34 +00:00
|
|
|
snprintf(buf, sizeof(buf), "%d", i);
|
2013-07-19 11:58:27 +00:00
|
|
|
dir = oprofilefs_mkdir(root, buf);
|
2013-07-19 12:10:36 +00:00
|
|
|
oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
|
|
|
|
oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
|
|
|
|
oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
|
|
|
|
oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
|
|
|
|
oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
|
|
|
|
oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
|
|
|
|
oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2008-01-30 12:32:33 +00:00
|
|
|
|
2008-09-05 10:17:40 +00:00
|
|
|
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
int cpu = (unsigned long)data;
|
|
|
|
switch (action) {
|
|
|
|
case CPU_DOWN_FAILED:
|
|
|
|
case CPU_ONLINE:
|
2010-04-29 12:55:55 +00:00
|
|
|
smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
|
2008-09-05 10:17:40 +00:00
|
|
|
break;
|
|
|
|
case CPU_DOWN_PREPARE:
|
2010-04-29 12:55:55 +00:00
|
|
|
smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
|
2008-09-05 10:17:40 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block oprofile_cpu_nb = {
|
|
|
|
.notifier_call = oprofile_cpu_notifier
|
|
|
|
};
|
|
|
|
|
2010-05-03 13:52:26 +00:00
|
|
|
static int nmi_setup(void)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (!allocate_msrs())
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* We need to serialize save and setup for HT because the subset
|
|
|
|
* of msrs are distinct for save and setup operations
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Assume saved/restored counters are the same on all CPUs */
|
|
|
|
err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
|
|
|
|
if (err)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
if (!cpu)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
memcpy(per_cpu(cpu_msrs, cpu).counters,
|
|
|
|
per_cpu(cpu_msrs, 0).counters,
|
|
|
|
sizeof(struct op_msr) * model->num_counters);
|
|
|
|
|
|
|
|
memcpy(per_cpu(cpu_msrs, cpu).controls,
|
|
|
|
per_cpu(cpu_msrs, 0).controls,
|
|
|
|
sizeof(struct op_msr) * model->num_controls);
|
|
|
|
|
|
|
|
mux_clone(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
nmi_enabled = 0;
|
|
|
|
ctr_running = 0;
|
2011-06-01 13:31:44 +00:00
|
|
|
/* make variables visible to the nmi handler: */
|
|
|
|
smp_mb();
|
2011-09-30 19:06:21 +00:00
|
|
|
err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
|
|
|
|
0, "oprofile");
|
2010-05-03 13:52:26 +00:00
|
|
|
if (err)
|
|
|
|
goto fail;
|
|
|
|
|
x86, oprofile, nmi: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
register_cpu_notifier(&foobar_cpu_notifier);
put_online_cpus();
This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).
Instead, the correct and race-free way of performing the callback
registration is:
cpu_notifier_register_begin();
for_each_online_cpu(cpu)
init_cpu(cpu);
/* Note the use of the double underscored version of the API */
__register_cpu_notifier(&foobar_cpu_notifier);
cpu_notifier_register_done();
Fix the oprofile code in x86 by using this latter form of callback
registration. But retain the calls to get/put_online_cpus(), since they are
used in other places as well, to protect the variables 'nmi_enabled' and
'ctr_running'. Strictly speaking, this is not necessary since
cpu_notifier_register_begin/done() provide a stronger synchronization
with CPU hotplug than get/put_online_cpus(). However, let's retain the
calls to get/put_online_cpus() to be consistent with the other call-sites.
By nesting get/put_online_cpus() *inside* cpu_notifier_register_begin/done(),
we avoid the ABBA deadlock possibility mentioned above.
Cc: Robert Richter <rric@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-03-10 20:38:49 +00:00
|
|
|
cpu_notifier_register_begin();
|
|
|
|
|
|
|
|
/* Use get/put_online_cpus() to protect 'nmi_enabled' */
|
2010-05-03 13:52:26 +00:00
|
|
|
get_online_cpus();
|
|
|
|
nmi_enabled = 1;
|
2011-06-01 13:31:44 +00:00
|
|
|
/* make nmi_enabled visible to the nmi handler: */
|
|
|
|
smp_mb();
|
|
|
|
on_each_cpu(nmi_cpu_setup, NULL, 1);
|
x86, oprofile, nmi: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
register_cpu_notifier(&foobar_cpu_notifier);
put_online_cpus();
This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).
Instead, the correct and race-free way of performing the callback
registration is:
cpu_notifier_register_begin();
for_each_online_cpu(cpu)
init_cpu(cpu);
/* Note the use of the double underscored version of the API */
__register_cpu_notifier(&foobar_cpu_notifier);
cpu_notifier_register_done();
Fix the oprofile code in x86 by using this latter form of callback
registration. But retain the calls to get/put_online_cpus(), since they are
used in other places as well, to protect the variables 'nmi_enabled' and
'ctr_running'. Strictly speaking, this is not necessary since
cpu_notifier_register_begin/done() provide a stronger synchronization
with CPU hotplug than get/put_online_cpus(). However, let's retain the
calls to get/put_online_cpus() to be consistent with the other call-sites.
By nesting get/put_online_cpus() *inside* cpu_notifier_register_begin/done(),
we avoid the ABBA deadlock possibility mentioned above.
Cc: Robert Richter <rric@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-03-10 20:38:49 +00:00
|
|
|
__register_cpu_notifier(&oprofile_cpu_nb);
|
2010-05-03 13:52:26 +00:00
|
|
|
put_online_cpus();
|
|
|
|
|
x86, oprofile, nmi: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
register_cpu_notifier(&foobar_cpu_notifier);
put_online_cpus();
This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).
Instead, the correct and race-free way of performing the callback
registration is:
cpu_notifier_register_begin();
for_each_online_cpu(cpu)
init_cpu(cpu);
/* Note the use of the double underscored version of the API */
__register_cpu_notifier(&foobar_cpu_notifier);
cpu_notifier_register_done();
Fix the oprofile code in x86 by using this latter form of callback
registration. But retain the calls to get/put_online_cpus(), since they are
used in other places as well, to protect the variables 'nmi_enabled' and
'ctr_running'. Strictly speaking, this is not necessary since
cpu_notifier_register_begin/done() provide a stronger synchronization
with CPU hotplug than get/put_online_cpus(). However, let's retain the
calls to get/put_online_cpus() to be consistent with the other call-sites.
By nesting get/put_online_cpus() *inside* cpu_notifier_register_begin/done(),
we avoid the ABBA deadlock possibility mentioned above.
Cc: Robert Richter <rric@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-03-10 20:38:49 +00:00
|
|
|
cpu_notifier_register_done();
|
|
|
|
|
2010-05-03 13:52:26 +00:00
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
free_msrs();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nmi_shutdown(void)
|
|
|
|
{
|
|
|
|
struct op_msrs *msrs;
|
|
|
|
|
x86, oprofile, nmi: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
register_cpu_notifier(&foobar_cpu_notifier);
put_online_cpus();
This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).
Instead, the correct and race-free way of performing the callback
registration is:
cpu_notifier_register_begin();
for_each_online_cpu(cpu)
init_cpu(cpu);
/* Note the use of the double underscored version of the API */
__register_cpu_notifier(&foobar_cpu_notifier);
cpu_notifier_register_done();
Fix the oprofile code in x86 by using this latter form of callback
registration. But retain the calls to get/put_online_cpus(), since they are
used in other places as well, to protect the variables 'nmi_enabled' and
'ctr_running'. Strictly speaking, this is not necessary since
cpu_notifier_register_begin/done() provide a stronger synchronization
with CPU hotplug than get/put_online_cpus(). However, let's retain the
calls to get/put_online_cpus() to be consistent with the other call-sites.
By nesting get/put_online_cpus() *inside* cpu_notifier_register_begin/done(),
we avoid the ABBA deadlock possibility mentioned above.
Cc: Robert Richter <rric@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-03-10 20:38:49 +00:00
|
|
|
cpu_notifier_register_begin();
|
|
|
|
|
|
|
|
/* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
|
2010-05-03 13:52:26 +00:00
|
|
|
get_online_cpus();
|
|
|
|
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
|
|
|
|
nmi_enabled = 0;
|
|
|
|
ctr_running = 0;
|
x86, oprofile, nmi: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
register_cpu_notifier(&foobar_cpu_notifier);
put_online_cpus();
This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).
Instead, the correct and race-free way of performing the callback
registration is:
cpu_notifier_register_begin();
for_each_online_cpu(cpu)
init_cpu(cpu);
/* Note the use of the double underscored version of the API */
__register_cpu_notifier(&foobar_cpu_notifier);
cpu_notifier_register_done();
Fix the oprofile code in x86 by using this latter form of callback
registration. But retain the calls to get/put_online_cpus(), since they are
used in other places as well, to protect the variables 'nmi_enabled' and
'ctr_running'. Strictly speaking, this is not necessary since
cpu_notifier_register_begin/done() provide a stronger synchronization
with CPU hotplug than get/put_online_cpus(). However, let's retain the
calls to get/put_online_cpus() to be consistent with the other call-sites.
By nesting get/put_online_cpus() *inside* cpu_notifier_register_begin/done(),
we avoid the ABBA deadlock possibility mentioned above.
Cc: Robert Richter <rric@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-03-10 20:38:49 +00:00
|
|
|
__unregister_cpu_notifier(&oprofile_cpu_nb);
|
2010-05-03 13:52:26 +00:00
|
|
|
put_online_cpus();
|
x86, oprofile, nmi: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
register_cpu_notifier(&foobar_cpu_notifier);
put_online_cpus();
This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).
Instead, the correct and race-free way of performing the callback
registration is:
cpu_notifier_register_begin();
for_each_online_cpu(cpu)
init_cpu(cpu);
/* Note the use of the double underscored version of the API */
__register_cpu_notifier(&foobar_cpu_notifier);
cpu_notifier_register_done();
Fix the oprofile code in x86 by using this latter form of callback
registration. But retain the calls to get/put_online_cpus(), since they are
used in other places as well, to protect the variables 'nmi_enabled' and
'ctr_running'. Strictly speaking, this is not necessary since
cpu_notifier_register_begin/done() provide a stronger synchronization
with CPU hotplug than get/put_online_cpus(). However, let's retain the
calls to get/put_online_cpus() to be consistent with the other call-sites.
By nesting get/put_online_cpus() *inside* cpu_notifier_register_begin/done(),
we avoid the ABBA deadlock possibility mentioned above.
Cc: Robert Richter <rric@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-03-10 20:38:49 +00:00
|
|
|
|
|
|
|
cpu_notifier_register_done();
|
|
|
|
|
2011-06-01 13:31:44 +00:00
|
|
|
/* make variables visible to the nmi handler: */
|
|
|
|
smp_mb();
|
2011-09-30 19:06:21 +00:00
|
|
|
unregister_nmi_handler(NMI_LOCAL, "oprofile");
|
2010-05-03 13:52:26 +00:00
|
|
|
msrs = &get_cpu_var(cpu_msrs);
|
|
|
|
model->shutdown(msrs);
|
|
|
|
free_msrs();
|
|
|
|
put_cpu_var(cpu_msrs);
|
|
|
|
}
|
|
|
|
|
2008-09-05 10:17:40 +00:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
|
2011-03-23 21:15:54 +00:00
|
|
|
static int nmi_suspend(void)
|
2008-09-05 10:17:40 +00:00
|
|
|
{
|
|
|
|
/* Only one CPU left, just stop that one */
|
|
|
|
if (nmi_enabled == 1)
|
|
|
|
nmi_cpu_stop(NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-23 21:15:54 +00:00
|
|
|
static void nmi_resume(void)
|
2008-09-05 10:17:40 +00:00
|
|
|
{
|
|
|
|
if (nmi_enabled == 1)
|
|
|
|
nmi_cpu_start(NULL);
|
|
|
|
}
|
|
|
|
|
2011-03-23 21:15:54 +00:00
|
|
|
static struct syscore_ops oprofile_syscore_ops = {
|
2008-09-05 10:17:40 +00:00
|
|
|
.resume = nmi_resume,
|
|
|
|
.suspend = nmi_suspend,
|
|
|
|
};
|
|
|
|
|
2011-03-23 21:15:54 +00:00
|
|
|
static void __init init_suspend_resume(void)
|
2008-09-05 10:17:40 +00:00
|
|
|
{
|
2011-03-23 21:15:54 +00:00
|
|
|
register_syscore_ops(&oprofile_syscore_ops);
|
2008-09-05 10:17:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-23 21:15:54 +00:00
|
|
|
static void exit_suspend_resume(void)
|
2008-09-05 10:17:40 +00:00
|
|
|
{
|
2011-03-23 21:15:54 +00:00
|
|
|
unregister_syscore_ops(&oprofile_syscore_ops);
|
2008-09-05 10:17:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
2010-09-01 12:50:50 +00:00
|
|
|
|
2011-03-23 21:15:54 +00:00
|
|
|
static inline void init_suspend_resume(void) { }
|
|
|
|
static inline void exit_suspend_resume(void) { }
|
2010-09-01 12:50:50 +00:00
|
|
|
|
2008-09-05 10:17:40 +00:00
|
|
|
#endif /* CONFIG_PM */
|
|
|
|
|
2008-01-30 12:32:33 +00:00
|
|
|
static int __init p4_init(char **cpu_type)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
__u8 cpu_model = boot_cpu_data.x86_model;
|
|
|
|
|
2009-04-27 15:44:12 +00:00
|
|
|
if (cpu_model > 6 || cpu_model == 5)
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
*cpu_type = "i386/p4";
|
|
|
|
model = &op_p4_spec;
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
switch (smp_num_siblings) {
|
2008-01-30 12:32:33 +00:00
|
|
|
case 1:
|
|
|
|
*cpu_type = "i386/p4";
|
|
|
|
model = &op_p4_spec;
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
*cpu_type = "i386/p4-ht";
|
|
|
|
model = &op_p4_ht2_spec;
|
|
|
|
return 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
|
|
|
|
printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-11 17:39:16 +00:00
|
|
|
enum __force_cpu_type {
|
|
|
|
reserved = 0, /* do not force */
|
|
|
|
timer,
|
|
|
|
arch_perfmon,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int force_cpu_type;
|
|
|
|
|
|
|
|
static int set_cpu_type(const char *str, struct kernel_param *kp)
|
2009-05-06 10:10:23 +00:00
|
|
|
{
|
2011-10-11 17:39:16 +00:00
|
|
|
if (!strcmp(str, "timer")) {
|
|
|
|
force_cpu_type = timer;
|
|
|
|
printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
|
|
|
|
} else if (!strcmp(str, "arch_perfmon")) {
|
|
|
|
force_cpu_type = arch_perfmon;
|
2009-05-06 10:10:23 +00:00
|
|
|
printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
|
2011-10-11 17:39:16 +00:00
|
|
|
} else {
|
|
|
|
force_cpu_type = 0;
|
2009-05-06 10:10:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2011-10-11 17:39:16 +00:00
|
|
|
module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
|
2009-04-27 15:44:11 +00:00
|
|
|
|
2008-01-30 12:32:33 +00:00
|
|
|
static int __init ppro_init(char **cpu_type)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
__u8 cpu_model = boot_cpu_data.x86_model;
|
2009-07-09 13:12:35 +00:00
|
|
|
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-10-11 17:39:16 +00:00
|
|
|
if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
|
2009-04-27 15:44:11 +00:00
|
|
|
return 0;
|
|
|
|
|
2010-05-07 16:41:40 +00:00
|
|
|
/*
|
|
|
|
* Documentation on identifying Intel processors by CPU family
|
|
|
|
* and model can be found in the Intel Software Developer's
|
|
|
|
* Manuals (SDM):
|
|
|
|
*
|
|
|
|
* http://www.intel.com/products/processor/manuals/
|
|
|
|
*
|
|
|
|
* As of May 2010 the documentation for this was in the:
|
|
|
|
* "Intel 64 and IA-32 Architectures Software Developer's
|
|
|
|
* Manual Volume 3B: System Programming Guide", "Table B-1
|
|
|
|
* CPUID Signature Values of DisplayFamily_DisplayModel".
|
|
|
|
*/
|
2008-07-25 00:29:00 +00:00
|
|
|
switch (cpu_model) {
|
|
|
|
case 0 ... 2:
|
|
|
|
*cpu_type = "i386/ppro";
|
|
|
|
break;
|
|
|
|
case 3 ... 5:
|
|
|
|
*cpu_type = "i386/pii";
|
|
|
|
break;
|
|
|
|
case 6 ... 8:
|
2008-11-30 20:39:10 +00:00
|
|
|
case 10 ... 11:
|
2008-07-25 00:29:00 +00:00
|
|
|
*cpu_type = "i386/piii";
|
|
|
|
break;
|
|
|
|
case 9:
|
2008-11-30 20:39:10 +00:00
|
|
|
case 13:
|
2008-07-25 00:29:00 +00:00
|
|
|
*cpu_type = "i386/p6_mobile";
|
|
|
|
break;
|
|
|
|
case 14:
|
2006-05-15 16:44:24 +00:00
|
|
|
*cpu_type = "i386/core";
|
2008-07-25 00:29:00 +00:00
|
|
|
break;
|
2010-09-08 14:34:28 +00:00
|
|
|
case 0x0f:
|
|
|
|
case 0x16:
|
|
|
|
case 0x17:
|
2010-09-21 07:26:35 +00:00
|
|
|
case 0x1d:
|
2008-07-25 00:29:00 +00:00
|
|
|
*cpu_type = "i386/core_2";
|
|
|
|
break;
|
2010-05-07 16:41:40 +00:00
|
|
|
case 0x1a:
|
2010-08-05 00:27:05 +00:00
|
|
|
case 0x1e:
|
2010-01-21 22:26:27 +00:00
|
|
|
case 0x2e:
|
2009-06-12 16:32:07 +00:00
|
|
|
spec = &op_arch_perfmon_spec;
|
2009-04-27 15:44:13 +00:00
|
|
|
*cpu_type = "i386/core_i7";
|
|
|
|
break;
|
2010-05-07 16:41:40 +00:00
|
|
|
case 0x1c:
|
2009-04-27 15:44:13 +00:00
|
|
|
*cpu_type = "i386/atom";
|
|
|
|
break;
|
2008-07-25 00:29:00 +00:00
|
|
|
default:
|
|
|
|
/* Unknown */
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-06-12 16:32:07 +00:00
|
|
|
model = spec;
|
2005-04-16 22:20:36 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2005-09-06 22:17:26 +00:00
|
|
|
int __init op_nmi_init(struct oprofile_operations *ops)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
__u8 vendor = boot_cpu_data.x86_vendor;
|
|
|
|
__u8 family = boot_cpu_data.x86;
|
2008-08-18 12:50:31 +00:00
|
|
|
char *cpu_type = NULL;
|
2008-07-22 19:08:48 +00:00
|
|
|
int ret = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!cpu_has_apic)
|
|
|
|
return -ENODEV;
|
2008-01-30 12:32:33 +00:00
|
|
|
|
2011-10-11 17:39:16 +00:00
|
|
|
if (force_cpu_type == timer)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (vendor) {
|
2008-01-30 12:32:33 +00:00
|
|
|
case X86_VENDOR_AMD:
|
|
|
|
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:32:33 +00:00
|
|
|
switch (family) {
|
|
|
|
case 6:
|
|
|
|
cpu_type = "i386/athlon";
|
|
|
|
break;
|
|
|
|
case 0xf:
|
2009-01-11 12:01:16 +00:00
|
|
|
/*
|
|
|
|
* Actually it could be i386/hammer too, but
|
|
|
|
* give user space an consistent name.
|
|
|
|
*/
|
2008-01-30 12:32:33 +00:00
|
|
|
cpu_type = "x86-64/hammer";
|
|
|
|
break;
|
|
|
|
case 0x10:
|
|
|
|
cpu_type = "x86-64/family10";
|
|
|
|
break;
|
2008-07-22 19:08:47 +00:00
|
|
|
case 0x11:
|
|
|
|
cpu_type = "x86-64/family11h";
|
|
|
|
break;
|
2010-08-31 08:44:17 +00:00
|
|
|
case 0x12:
|
|
|
|
cpu_type = "x86-64/family12h";
|
|
|
|
break;
|
2010-08-26 10:30:17 +00:00
|
|
|
case 0x14:
|
|
|
|
cpu_type = "x86-64/family14h";
|
|
|
|
break;
|
2010-08-31 08:44:38 +00:00
|
|
|
case 0x15:
|
|
|
|
cpu_type = "x86-64/family15h";
|
|
|
|
break;
|
2009-01-11 12:01:16 +00:00
|
|
|
default:
|
|
|
|
return -ENODEV;
|
2008-01-30 12:32:33 +00:00
|
|
|
}
|
2009-01-11 12:01:16 +00:00
|
|
|
model = &op_amd_spec;
|
2008-01-30 12:32:33 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case X86_VENDOR_INTEL:
|
|
|
|
switch (family) {
|
|
|
|
/* Pentium IV */
|
|
|
|
case 0xf:
|
2008-08-18 12:50:31 +00:00
|
|
|
p4_init(&cpu_type);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2008-01-30 12:32:33 +00:00
|
|
|
|
|
|
|
/* A P6-class processor */
|
|
|
|
case 6:
|
2008-08-18 12:50:31 +00:00
|
|
|
ppro_init(&cpu_type);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2008-08-18 12:50:31 +00:00
|
|
|
break;
|
2008-01-30 12:32:33 +00:00
|
|
|
}
|
2008-08-18 12:50:31 +00:00
|
|
|
|
2008-10-12 19:12:34 +00:00
|
|
|
if (cpu_type)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!cpu_has_arch_perfmon)
|
2008-08-18 12:50:31 +00:00
|
|
|
return -ENODEV;
|
2008-10-12 19:12:34 +00:00
|
|
|
|
|
|
|
/* use arch perfmon as fallback */
|
|
|
|
cpu_type = "i386/arch_perfmon";
|
|
|
|
model = &op_arch_perfmon_spec;
|
2008-01-30 12:32:33 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-07-22 19:09:01 +00:00
|
|
|
/* default values, can be overwritten by model */
|
2009-07-07 17:25:39 +00:00
|
|
|
ops->create_files = nmi_create_files;
|
|
|
|
ops->setup = nmi_setup;
|
|
|
|
ops->shutdown = nmi_shutdown;
|
|
|
|
ops->start = nmi_start;
|
|
|
|
ops->stop = nmi_stop;
|
|
|
|
ops->cpu_type = cpu_type;
|
2008-07-22 19:09:01 +00:00
|
|
|
|
2008-07-22 19:08:48 +00:00
|
|
|
if (model->init)
|
|
|
|
ret = model->init(ops);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2009-07-06 12:43:55 +00:00
|
|
|
if (!model->num_virt_counters)
|
|
|
|
model->num_virt_counters = model->num_counters;
|
|
|
|
|
2009-07-09 14:02:44 +00:00
|
|
|
mux_init(ops);
|
|
|
|
|
2011-03-23 21:15:54 +00:00
|
|
|
init_suspend_resume();
|
2010-08-30 08:56:18 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-09-06 22:17:26 +00:00
|
|
|
void op_nmi_exit(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-03-23 21:15:54 +00:00
|
|
|
exit_suspend_resume();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|