forked from Minki/linux
1da53e0230
This patch improves event scheduling by maximizing the use of PMU registers regardless of the order in which events are created in a group. The algorithm takes into account the list of counter constraints for each event. It assigns events to counters from the most constrained, i.e., works on only one counter, to the least constrained, i.e., works on any counter. Intel Fixed counter events and the BTS special event are also handled via this algorithm which is designed to be fairly generic. The patch also updates the validation of an event to use the scheduling algorithm. This will cause early failure in perf_event_open(). The 2nd version of this patch follows the model used by PPC, by running the scheduling algorithm and the actual assignment separately. Actual assignment takes place in hw_perf_enable() whereas scheduling is implemented in hw_perf_group_sched_in() and x86_pmu_enable(). Signed-off-by: Stephane Eranian <eranian@google.com> [ fixup whitespace and style nits as well as adding is_x86_event() ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <4b5430c6.0f975e0a.1bf9.ffff85fe@mx.google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
132 lines
3.4 KiB
C
132 lines
3.4 KiB
C
#ifndef _ASM_X86_PERF_EVENT_H
|
|
#define _ASM_X86_PERF_EVENT_H
|
|
|
|
/*
|
|
* Performance event hw details:
|
|
*/
|
|
|
|
#define X86_PMC_MAX_GENERIC 8
|
|
#define X86_PMC_MAX_FIXED 3
|
|
|
|
#define X86_PMC_IDX_GENERIC 0
|
|
#define X86_PMC_IDX_FIXED 32
|
|
#define X86_PMC_IDX_MAX 64
|
|
|
|
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
|
|
#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
|
|
|
|
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
|
|
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
|
|
|
|
#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
|
|
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
|
|
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
|
|
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
|
|
|
|
/*
|
|
* Includes eventsel and unit mask as well:
|
|
*/
|
|
|
|
|
|
#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
|
|
#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
|
|
#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
|
|
#define INTEL_ARCH_INV_MASK 0x00800000ULL
|
|
#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
|
|
#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
|
|
|
|
/*
|
|
* filter mask to validate fixed counter events.
|
|
* the following filters disqualify for fixed counters:
|
|
* - inv
|
|
* - edge
|
|
* - cnt-mask
|
|
* The other filters are supported by fixed counters.
|
|
* The any-thread option is supported starting with v3.
|
|
*/
|
|
#define INTEL_ARCH_FIXED_MASK \
|
|
(INTEL_ARCH_CNT_MASK| \
|
|
INTEL_ARCH_INV_MASK| \
|
|
INTEL_ARCH_EDGE_MASK|\
|
|
INTEL_ARCH_UNIT_MASK|\
|
|
INTEL_ARCH_EVENT_MASK)
|
|
|
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
|
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
|
|
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
|
|
|
|
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
|
|
|
|
/*
|
|
* Intel "Architectural Performance Monitoring" CPUID
|
|
* detection/enumeration details:
|
|
*/
|
|
union cpuid10_eax {
|
|
struct {
|
|
unsigned int version_id:8;
|
|
unsigned int num_events:8;
|
|
unsigned int bit_width:8;
|
|
unsigned int mask_length:8;
|
|
} split;
|
|
unsigned int full;
|
|
};
|
|
|
|
union cpuid10_edx {
|
|
struct {
|
|
unsigned int num_events_fixed:4;
|
|
unsigned int reserved:28;
|
|
} split;
|
|
unsigned int full;
|
|
};
|
|
|
|
|
|
/*
|
|
* Fixed-purpose performance events:
|
|
*/
|
|
|
|
/*
|
|
* All 3 fixed-mode PMCs are configured via this single MSR:
|
|
*/
|
|
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
|
|
|
|
/*
|
|
* The counts are available in three separate MSRs:
|
|
*/
|
|
|
|
/* Instr_Retired.Any: */
|
|
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
|
|
#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
|
|
|
|
/* CPU_CLK_Unhalted.Core: */
|
|
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
|
|
#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
|
|
|
|
/* CPU_CLK_Unhalted.Ref: */
|
|
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
|
|
#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
|
|
|
|
/*
|
|
* We model BTS tracing as another fixed-mode PMC.
|
|
*
|
|
* We choose a value in the middle of the fixed event range, since lower
|
|
* values are used by actual fixed events and higher values are used
|
|
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
|
|
*/
|
|
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
extern void init_hw_perf_events(void);
|
|
extern void perf_events_lapic_init(void);
|
|
|
|
#define PERF_EVENT_INDEX_OFFSET 0
|
|
|
|
#else
|
|
static inline void init_hw_perf_events(void) { }
|
|
static inline void perf_events_lapic_init(void) { }
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_PERF_EVENT_H */
|