forked from Minki/linux
Merge branch 'x86-tsx-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 tsx fixes from Thomas Gleixner: "This update provides kernel side handling for the TSX erratum of Intel Skylake (and later) CPUs. On these CPUs Intel Transactional Synchronization Extensions (TSX) functions can result in unpredictable system behavior under certain circumstances. The issue is mitigated with an microcode update which utilizes Performance Monitoring Counter (PMC) 3 when TSX functions are in use. This mitigation is enabled unconditionally by the updated microcode. As a consequence the usage of TSX functions can cause corrupted performance monitoring results for events which utilize PMC3. The corruption is silent on kernels which have no update for this issue. This update makes the kernel aware of the PMC3 utilization by the microcode: The microcode offers a possibility to enforce TSX abort which prevents the malfunction and frees up PMC3. The enforced TSX abort requires the TSX using application to have a software fallback path implemented; abort handlers which solely retry the transaction will fail over and over. The enforced TSX abort request is issued by the kernel when: - enforced TSX abort is enabled (PMU attribute) - A performance monitoring request needs PMC3 When PMC3 is not longer used by the kernel the TSX force abort request is cleared. The enforced TSX abort mechanism is enabled by default and can be controlled by the administrator via the new PMU attribute 'allow_tsx_force_abort'. This attribute is only visible when updated microcode is detected on affected systems. Writing '0' disables the enforced TSX abort mechanism, '1' enables it. As a result of disabling the enforced TSX abort mechanism, PMC3 is permanentely unavailable for performance monitoring which can cause performance monitoring requests to fail or switch to multiplexing mode" * branch 'x86-tsx-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel: Implement support for TSX Force Abort x86: Add TSX Force Abort CPUID/MSR perf/x86/intel: Generalize dynamic constraint creation perf/x86/intel: Make cpuc allocations consistent
This commit is contained in:
commit
004cc08675
@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
|
||||
*/
|
||||
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
kfree(cpuc->shared_regs);
|
||||
intel_cpuc_finish(cpuc);
|
||||
kfree(cpuc);
|
||||
}
|
||||
|
||||
@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
|
||||
cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
|
||||
if (!cpuc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* only needed, if we have extra_regs */
|
||||
if (x86_pmu.extra_regs) {
|
||||
cpuc->shared_regs = allocate_shared_regs(cpu);
|
||||
if (!cpuc->shared_regs)
|
||||
goto error;
|
||||
}
|
||||
cpuc->is_fake = 1;
|
||||
|
||||
if (intel_cpuc_prepare(cpuc, cpu))
|
||||
goto error;
|
||||
|
||||
return cpuc;
|
||||
error:
|
||||
free_fake_cpuc(cpuc);
|
||||
|
@ -2000,6 +2000,39 @@ static void intel_pmu_nhm_enable_all(int added)
|
||||
intel_pmu_enable_all(added);
|
||||
}
|
||||
|
||||
static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
|
||||
{
|
||||
u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
|
||||
|
||||
if (cpuc->tfa_shadow != val) {
|
||||
cpuc->tfa_shadow = val;
|
||||
wrmsrl(MSR_TSX_FORCE_ABORT, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
|
||||
{
|
||||
/*
|
||||
* We're going to use PMC3, make sure TFA is set before we touch it.
|
||||
*/
|
||||
if (cntr == 3 && !cpuc->is_fake)
|
||||
intel_set_tfa(cpuc, true);
|
||||
}
|
||||
|
||||
static void intel_tfa_pmu_enable_all(int added)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
/*
|
||||
* If we find PMC3 is no longer used when we enable the PMU, we can
|
||||
* clear TFA.
|
||||
*/
|
||||
if (!test_bit(3, cpuc->active_mask))
|
||||
intel_set_tfa(cpuc, false);
|
||||
|
||||
intel_pmu_enable_all(added);
|
||||
}
|
||||
|
||||
static void enable_counter_freeze(void)
|
||||
{
|
||||
update_debugctlmsr(get_debugctlmsr() |
|
||||
@ -2769,6 +2802,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
|
||||
raw_spin_unlock(&excl_cntrs->lock);
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
|
||||
{
|
||||
WARN_ON_ONCE(!cpuc->constraint_list);
|
||||
|
||||
if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
|
||||
struct event_constraint *cx;
|
||||
|
||||
/*
|
||||
* grab pre-allocated constraint entry
|
||||
*/
|
||||
cx = &cpuc->constraint_list[idx];
|
||||
|
||||
/*
|
||||
* initialize dynamic constraint
|
||||
* with static constraint
|
||||
*/
|
||||
*cx = *c;
|
||||
|
||||
/*
|
||||
* mark constraint as dynamic
|
||||
*/
|
||||
cx->flags |= PERF_X86_EVENT_DYNAMIC;
|
||||
c = cx;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
|
||||
int idx, struct event_constraint *c)
|
||||
@ -2799,27 +2861,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
|
||||
* only needed when constraint has not yet
|
||||
* been cloned (marked dynamic)
|
||||
*/
|
||||
if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
|
||||
struct event_constraint *cx;
|
||||
|
||||
/*
|
||||
* grab pre-allocated constraint entry
|
||||
*/
|
||||
cx = &cpuc->constraint_list[idx];
|
||||
|
||||
/*
|
||||
* initialize dynamic constraint
|
||||
* with static constraint
|
||||
*/
|
||||
*cx = *c;
|
||||
|
||||
/*
|
||||
* mark constraint as dynamic, so we
|
||||
* can free it later on
|
||||
*/
|
||||
cx->flags |= PERF_X86_EVENT_DYNAMIC;
|
||||
c = cx;
|
||||
}
|
||||
c = dyn_constraint(cpuc, c, idx);
|
||||
|
||||
/*
|
||||
* From here on, the constraint is dynamic.
|
||||
@ -3357,6 +3399,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||
return c;
|
||||
}
|
||||
|
||||
static bool allow_tsx_force_abort = true;
|
||||
|
||||
static struct event_constraint *
|
||||
tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
|
||||
|
||||
/*
|
||||
* Without TFA we must not use PMC3.
|
||||
*/
|
||||
if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
|
||||
c = dyn_constraint(cpuc, c, idx);
|
||||
c->idxmsk64 &= ~(1ULL << 3);
|
||||
c->weight--;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
/*
|
||||
* Broadwell:
|
||||
*
|
||||
@ -3410,7 +3472,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
|
||||
return x86_event_sysfs_show(page, config, event);
|
||||
}
|
||||
|
||||
struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||
static struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||
{
|
||||
struct intel_shared_regs *regs;
|
||||
int i;
|
||||
@ -3442,23 +3504,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
|
||||
return c;
|
||||
}
|
||||
|
||||
static int intel_pmu_cpu_prepare(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
|
||||
{
|
||||
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
|
||||
cpuc->shared_regs = allocate_shared_regs(cpu);
|
||||
if (!cpuc->shared_regs)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||
if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
|
||||
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
|
||||
|
||||
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
|
||||
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!cpuc->constraint_list)
|
||||
goto err_shared_regs;
|
||||
}
|
||||
|
||||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
|
||||
if (!cpuc->excl_cntrs)
|
||||
goto err_constraint_list;
|
||||
@ -3480,6 +3543,11 @@ err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int intel_pmu_cpu_prepare(int cpu)
|
||||
{
|
||||
return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
|
||||
}
|
||||
|
||||
static void flip_smm_bit(void *data)
|
||||
{
|
||||
unsigned long set = *(unsigned long *)data;
|
||||
@ -3554,9 +3622,8 @@ static void intel_pmu_cpu_starting(int cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void free_excl_cntrs(int cpu)
|
||||
static void free_excl_cntrs(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
struct intel_excl_cntrs *c;
|
||||
|
||||
c = cpuc->excl_cntrs;
|
||||
@ -3564,9 +3631,10 @@ static void free_excl_cntrs(int cpu)
|
||||
if (c->core_id == -1 || --c->refcnt == 0)
|
||||
kfree(c);
|
||||
cpuc->excl_cntrs = NULL;
|
||||
kfree(cpuc->constraint_list);
|
||||
cpuc->constraint_list = NULL;
|
||||
}
|
||||
|
||||
kfree(cpuc->constraint_list);
|
||||
cpuc->constraint_list = NULL;
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dying(int cpu)
|
||||
@ -3577,9 +3645,8 @@ static void intel_pmu_cpu_dying(int cpu)
|
||||
disable_counter_freeze();
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dead(int cpu)
|
||||
void intel_cpuc_finish(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
struct intel_shared_regs *pc;
|
||||
|
||||
pc = cpuc->shared_regs;
|
||||
@ -3589,7 +3656,12 @@ static void intel_pmu_cpu_dead(int cpu)
|
||||
cpuc->shared_regs = NULL;
|
||||
}
|
||||
|
||||
free_excl_cntrs(cpu);
|
||||
free_excl_cntrs(cpuc);
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dead(int cpu)
|
||||
{
|
||||
intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
|
||||
}
|
||||
|
||||
static void intel_pmu_sched_task(struct perf_event_context *ctx,
|
||||
@ -4107,8 +4179,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
|
||||
|
||||
static struct attribute *intel_pmu_attrs[] = {
|
||||
&dev_attr_freeze_on_smi.attr,
|
||||
NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -4607,6 +4682,15 @@ __init int intel_pmu_init(void)
|
||||
tsx_attr = hsw_tsx_events_attrs;
|
||||
intel_pmu_pebs_data_source_skl(
|
||||
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
|
||||
x86_pmu.flags |= PMU_FL_TFA;
|
||||
x86_pmu.get_event_constraints = tfa_get_event_constraints;
|
||||
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
|
||||
x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
|
||||
intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
|
||||
}
|
||||
|
||||
pr_cont("Skylake events, ");
|
||||
name = "skylake";
|
||||
break;
|
||||
@ -4758,7 +4842,7 @@ static __init int fixup_ht_bug(void)
|
||||
hardlockup_detector_perf_restart();
|
||||
|
||||
for_each_online_cpu(c)
|
||||
free_excl_cntrs(c);
|
||||
free_excl_cntrs(&per_cpu(cpu_hw_events, c));
|
||||
|
||||
cpus_read_unlock();
|
||||
pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
|
||||
|
@ -242,6 +242,11 @@ struct cpu_hw_events {
|
||||
struct intel_excl_cntrs *excl_cntrs;
|
||||
int excl_thread_id; /* 0 or 1 */
|
||||
|
||||
/*
|
||||
* SKL TSX_FORCE_ABORT shadow
|
||||
*/
|
||||
u64 tfa_shadow;
|
||||
|
||||
/*
|
||||
* AMD specific bits
|
||||
*/
|
||||
@ -682,6 +687,7 @@ do { \
|
||||
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
|
||||
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
|
||||
#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
|
||||
#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
|
||||
|
||||
#define EVENT_VAR(_id) event_attr_##_id
|
||||
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
|
||||
@ -890,7 +896,8 @@ struct event_constraint *
|
||||
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||
struct perf_event *event);
|
||||
|
||||
struct intel_shared_regs *allocate_shared_regs(int cpu);
|
||||
extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
|
||||
extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
|
||||
|
||||
int intel_pmu_init(void);
|
||||
|
||||
@ -1026,9 +1033,13 @@ static inline int intel_pmu_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||
static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int is_ht_workaround_enabled(void)
|
||||
|
@ -344,6 +344,7 @@
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
|
||||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
|
@ -666,6 +666,12 @@
|
||||
|
||||
#define MSR_IA32_TSC_DEADLINE 0x000006E0
|
||||
|
||||
|
||||
#define MSR_TSX_FORCE_ABORT 0x0000010F
|
||||
|
||||
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
|
||||
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
|
||||
|
||||
/* P4/Xeon+ specific */
|
||||
#define MSR_IA32_MCG_EAX 0x00000180
|
||||
#define MSR_IA32_MCG_EBX 0x00000181
|
||||
|
Loading…
Reference in New Issue
Block a user