forked from Minki/linux
3a4ac121c2
Zhaoxin CPU has provided facilities for monitoring performance via PMU (Performance Monitor Unit), but the functionality is unused so far. Therefore, add support for zhaoxin pmu to make performance related hardware events available. The PMU is mostly an Intel Architectural PerfMon-v2 with a novel errata for the ZXC line. It supports the following events: ----------------------------------------------------------------------------------------------------------------------------------- Event | Event | Umask | Description | Select | | ----------------------------------------------------------------------------------------------------------------------------------- cpu-cycles | 82h | 00h | unhalt core clock instructions | 00h | 00h | number of instructions at retirement. cache-references | 15h | 05h | number of fillq pushs at the current cycle. cache-misses | 1ah | 05h | number of l2 miss pushed by fillq. branch-instructions | 28h | 00h | counts the number of branch instructions retired. branch-misses | 29h | 00h | mispredicted branch instructions at retirement. bus-cycles | 83h | 00h | unhalt bus clock stalled-cycles-frontend | 01h | 01h | Increments each cycle the # of Uops issued by the RAT to RS. stalled-cycles-backend | 0fh | 04h | RS0/1/2/3/45 empty L1-dcache-loads | 68h | 05h | number of retire/commit load. L1-dcache-load-misses | 4bh | 05h | retired load uops whose data source followed an L1 miss. L1-dcache-stores | 69h | 06h | number of retire/commit Store,no LEA L1-dcache-store-misses | 62h | 05h | cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement. L1-icache-loads | 00h | 03h | number of l1i cache access for valid normal fetch,including un-cacheable access. L1-icache-load-misses | 01h | 03h | number of l1i cache miss for valid normal fetch,including un-cacheable miss. L1-icache-prefetches | 0ah | 03h | number of prefetch. L1-icache-prefetch-misses | 0bh | 03h | number of prefetch miss. dTLB-loads | 68h | 05h | number of retire/commit load dTLB-load-misses | 2ch | 05h | number of load operations miss all level tlbs and cause a tablewalk. dTLB-stores | 69h | 06h | number of retire/commit Store,no LEA dTLB-store-misses | 30h | 05h | number of store operations miss all level tlbs and cause a tablewalk. dTLB-prefetches | 64h | 05h | number of hardware pte prefetch requests dispatched out of the prefetch FIFO. dTLB-prefetch-misses | 65h | 05h | number of hardware pte prefetch requests miss the l1d data cache. iTLB-load | 00h | 00h | actually counter instructions. iTLB-load-misses | 34h | 05h | number of code operations miss all level tlbs and cause a tablewalk. ----------------------------------------------------------------------------------------------------------------------------------- Reported-by: kbuild test robot <lkp@intel.com> Signed-off-by: CodyYao-oc <CodyYao-oc@zhaoxin.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/1586747669-4827-1-git-send-email-CodyYao-oc@zhaoxin.com
172 lines
4.3 KiB
C
172 lines
4.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* local apic based NMI watchdog for various CPUs.
|
|
*
|
|
* This file also handles reservation of performance counters for coordination
|
|
* with other users (like oprofile).
|
|
*
|
|
* Note that these events normally don't tick when the CPU idles. This means
|
|
* the frequency varies with CPU load.
|
|
*
|
|
* Original code for K7/P6 written by Keith Owens
|
|
*
|
|
*/
|
|
|
|
#include <linux/percpu.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/smp.h>
|
|
#include <asm/nmi.h>
|
|
#include <linux/kprobes.h>
|
|
|
|
#include <asm/apic.h>
|
|
#include <asm/perf_event.h>
|
|
|
|
/*
|
|
* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
|
|
* offset from MSR_P4_BSU_ESCR0.
|
|
*
|
|
* It will be the max for all platforms (for now)
|
|
*/
|
|
#define NMI_MAX_COUNTER_BITS 66
|
|
|
|
/*
|
|
* perfctr_nmi_owner tracks the ownership of the perfctr registers:
|
|
* evtsel_nmi_owner tracks the ownership of the event selection
|
|
* - different performance counters/ event selection may be reserved for
|
|
* different subsystems this reservation system just tries to coordinate
|
|
* things a little
|
|
*/
|
|
static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
|
|
static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
|
|
|
|
/* converts an msr to an appropriate reservation bit */
|
|
static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
|
|
{
|
|
/* returns the bit offset of the performance counter register */
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
case X86_VENDOR_HYGON:
|
|
case X86_VENDOR_AMD:
|
|
if (msr >= MSR_F15H_PERF_CTR)
|
|
return (msr - MSR_F15H_PERF_CTR) >> 1;
|
|
return msr - MSR_K7_PERFCTR0;
|
|
case X86_VENDOR_INTEL:
|
|
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
|
return msr - MSR_ARCH_PERFMON_PERFCTR0;
|
|
|
|
switch (boot_cpu_data.x86) {
|
|
case 6:
|
|
return msr - MSR_P6_PERFCTR0;
|
|
case 11:
|
|
return msr - MSR_KNC_PERFCTR0;
|
|
case 15:
|
|
return msr - MSR_P4_BPU_PERFCTR0;
|
|
}
|
|
fallthrough;
|
|
case X86_VENDOR_ZHAOXIN:
|
|
case X86_VENDOR_CENTAUR:
|
|
return msr - MSR_ARCH_PERFMON_PERFCTR0;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* converts an msr to an appropriate reservation bit
|
|
* returns the bit offset of the event selection register
|
|
*/
|
|
static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
|
|
{
|
|
/* returns the bit offset of the event selection register */
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
case X86_VENDOR_HYGON:
|
|
case X86_VENDOR_AMD:
|
|
if (msr >= MSR_F15H_PERF_CTL)
|
|
return (msr - MSR_F15H_PERF_CTL) >> 1;
|
|
return msr - MSR_K7_EVNTSEL0;
|
|
case X86_VENDOR_INTEL:
|
|
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
|
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
|
|
|
|
switch (boot_cpu_data.x86) {
|
|
case 6:
|
|
return msr - MSR_P6_EVNTSEL0;
|
|
case 11:
|
|
return msr - MSR_KNC_EVNTSEL0;
|
|
case 15:
|
|
return msr - MSR_P4_BSU_ESCR0;
|
|
}
|
|
fallthrough;
|
|
case X86_VENDOR_ZHAOXIN:
|
|
case X86_VENDOR_CENTAUR:
|
|
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
|
|
}
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* checks for a bit availability (hack for oprofile) */
|
|
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
|
|
{
|
|
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
|
|
|
return !test_bit(counter, perfctr_nmi_owner);
|
|
}
|
|
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
|
|
|
|
int reserve_perfctr_nmi(unsigned int msr)
|
|
{
|
|
unsigned int counter;
|
|
|
|
counter = nmi_perfctr_msr_to_bit(msr);
|
|
/* register not managed by the allocator? */
|
|
if (counter > NMI_MAX_COUNTER_BITS)
|
|
return 1;
|
|
|
|
if (!test_and_set_bit(counter, perfctr_nmi_owner))
|
|
return 1;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(reserve_perfctr_nmi);
|
|
|
|
void release_perfctr_nmi(unsigned int msr)
|
|
{
|
|
unsigned int counter;
|
|
|
|
counter = nmi_perfctr_msr_to_bit(msr);
|
|
/* register not managed by the allocator? */
|
|
if (counter > NMI_MAX_COUNTER_BITS)
|
|
return;
|
|
|
|
clear_bit(counter, perfctr_nmi_owner);
|
|
}
|
|
EXPORT_SYMBOL(release_perfctr_nmi);
|
|
|
|
int reserve_evntsel_nmi(unsigned int msr)
|
|
{
|
|
unsigned int counter;
|
|
|
|
counter = nmi_evntsel_msr_to_bit(msr);
|
|
/* register not managed by the allocator? */
|
|
if (counter > NMI_MAX_COUNTER_BITS)
|
|
return 1;
|
|
|
|
if (!test_and_set_bit(counter, evntsel_nmi_owner))
|
|
return 1;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(reserve_evntsel_nmi);
|
|
|
|
void release_evntsel_nmi(unsigned int msr)
|
|
{
|
|
unsigned int counter;
|
|
|
|
counter = nmi_evntsel_msr_to_bit(msr);
|
|
/* register not managed by the allocator? */
|
|
if (counter > NMI_MAX_COUNTER_BITS)
|
|
return;
|
|
|
|
clear_bit(counter, evntsel_nmi_owner);
|
|
}
|
|
EXPORT_SYMBOL(release_evntsel_nmi);
|