forked from Minki/linux
perf: RISC-V: Add support for SBI PMU and Sscofpmf
This series improves perf support for RISC-V based system using SBI PMU and Sscofpmf extensions, by adding a new generic RISC-V perf framework along with a pair of drivers: one that usese the new performance-monitoring extensions and one that keeps support for the existing systems that only have the legacy counters. Tested-by: Nikita Shubin <n.shubin@yadro.com> * palmer/riscv-pmu: MAINTAINERS: Add entry for RISC-V PMU drivers Documentation: riscv: Remove the old documentation RISC-V: Add sscofpmf extension support RISC-V: Add perf platform driver based on SBI PMU extension RISC-V: Add RISC-V SBI PMU extension definitions RISC-V: Add a simple platform driver for RISC-V legacy perf RISC-V: Add a perf core library for pmu drivers RISC-V: Add CSR encodings for all HPMCOUNTERS RISC-V: Remove the current perf implementation
This commit is contained in:
commit
6ae1af9ca0
@ -1,255 +0,0 @@
|
||||
===================================
|
||||
Supporting PMUs on RISC-V platforms
|
||||
===================================
|
||||
|
||||
Alan Kao <alankao@andestech.com>, Mar 2018
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
As of this writing, perf_event-related features mentioned in The RISC-V ISA
|
||||
Privileged Version 1.10 are as follows:
|
||||
(please check the manual for more details)
|
||||
|
||||
* [m|s]counteren
|
||||
* mcycle[h], cycle[h]
|
||||
* minstret[h], instret[h]
|
||||
* mhpeventx, mhpcounterx[h]
|
||||
|
||||
With such function set only, porting perf would require a lot of work, due to
|
||||
the lack of the following general architectural performance monitoring features:
|
||||
|
||||
* Enabling/Disabling counters
|
||||
Counters are just free-running all the time in our case.
|
||||
* Interrupt caused by counter overflow
|
||||
No such feature in the spec.
|
||||
* Interrupt indicator
|
||||
It is not possible to have many interrupt ports for all counters, so an
|
||||
interrupt indicator is required for software to tell which counter has
|
||||
just overflowed.
|
||||
* Writing to counters
|
||||
There will be an SBI to support this since the kernel cannot modify the
|
||||
counters [1]. Alternatively, some vendor considers to implement
|
||||
hardware-extension for M-S-U model machines to write counters directly.
|
||||
|
||||
This document aims to provide developers a quick guide on supporting their
|
||||
PMUs in the kernel. The following sections briefly explain perf' mechanism
|
||||
and todos.
|
||||
|
||||
You may check previous discussions here [1][2]. Also, it might be helpful
|
||||
to check the appendix for related kernel structures.
|
||||
|
||||
|
||||
1. Initialization
|
||||
-----------------
|
||||
|
||||
*riscv_pmu* is a global pointer of type *struct riscv_pmu*, which contains
|
||||
various methods according to perf's internal convention and PMU-specific
|
||||
parameters. One should declare such instance to represent the PMU. By default,
|
||||
*riscv_pmu* points to a constant structure *riscv_base_pmu*, which has very
|
||||
basic support to a baseline QEMU model.
|
||||
|
||||
Then he/she can either assign the instance's pointer to *riscv_pmu* so that
|
||||
the minimal and already-implemented logic can be leveraged, or invent his/her
|
||||
own *riscv_init_platform_pmu* implementation.
|
||||
|
||||
In other words, existing sources of *riscv_base_pmu* merely provide a
|
||||
reference implementation. Developers can flexibly decide how many parts they
|
||||
can leverage, and in the most extreme case, they can customize every function
|
||||
according to their needs.
|
||||
|
||||
|
||||
2. Event Initialization
|
||||
-----------------------
|
||||
|
||||
When a user launches a perf command to monitor some events, it is first
|
||||
interpreted by the userspace perf tool into multiple *perf_event_open*
|
||||
system calls, and then each of them calls to the body of *event_init*
|
||||
member function that was assigned in the previous step. In *riscv_base_pmu*'s
|
||||
case, it is *riscv_event_init*.
|
||||
|
||||
The main purpose of this function is to translate the event provided by user
|
||||
into bitmap, so that HW-related control registers or counters can directly be
|
||||
manipulated. The translation is based on the mappings and methods provided in
|
||||
*riscv_pmu*.
|
||||
|
||||
Note that some features can be done in this stage as well:
|
||||
|
||||
(1) interrupt setting, which is stated in the next section;
|
||||
(2) privilege level setting (user space only, kernel space only, both);
|
||||
(3) destructor setting. Normally it is sufficient to apply *riscv_destroy_event*;
|
||||
(4) tweaks for non-sampling events, which will be utilized by functions such as
|
||||
*perf_adjust_period*, usually something like the follows::
|
||||
|
||||
if (!is_sampling_event(event)) {
|
||||
hwc->sample_period = x86_pmu.max_period;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
local64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
|
||||
In the case of *riscv_base_pmu*, only (3) is provided for now.
|
||||
|
||||
|
||||
3. Interrupt
|
||||
------------
|
||||
|
||||
3.1. Interrupt Initialization
|
||||
|
||||
This often occurs at the beginning of the *event_init* method. In common
|
||||
practice, this should be a code segment like::
|
||||
|
||||
int x86_reserve_hardware(void)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!atomic_inc_not_zero(&pmc_refcount)) {
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
if (atomic_read(&pmc_refcount) == 0) {
|
||||
if (!reserve_pmc_hardware())
|
||||
err = -EBUSY;
|
||||
else
|
||||
reserve_ds_buffers();
|
||||
}
|
||||
if (!err)
|
||||
atomic_inc(&pmc_refcount);
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
And the magic is in *reserve_pmc_hardware*, which usually does atomic
|
||||
operations to make implemented IRQ accessible from some global function pointer.
|
||||
*release_pmc_hardware* serves the opposite purpose, and it is used in event
|
||||
destructors mentioned in previous section.
|
||||
|
||||
(Note: From the implementations in all the architectures, the *reserve/release*
|
||||
pair are always IRQ settings, so the *pmc_hardware* seems somehow misleading.
|
||||
It does NOT deal with the binding between an event and a physical counter,
|
||||
which will be introduced in the next section.)
|
||||
|
||||
3.2. IRQ Structure
|
||||
|
||||
Basically, a IRQ runs the following pseudo code::
|
||||
|
||||
for each hardware counter that triggered this overflow
|
||||
|
||||
get the event of this counter
|
||||
|
||||
// following two steps are defined as *read()*,
|
||||
// check the section Reading/Writing Counters for details.
|
||||
count the delta value since previous interrupt
|
||||
update the event->count (# event occurs) by adding delta, and
|
||||
event->hw.period_left by subtracting delta
|
||||
|
||||
if the event overflows
|
||||
sample data
|
||||
set the counter appropriately for the next overflow
|
||||
|
||||
if the event overflows again
|
||||
too frequently, throttle this event
|
||||
fi
|
||||
fi
|
||||
|
||||
end for
|
||||
|
||||
However as of this writing, none of the RISC-V implementations have designed an
|
||||
interrupt for perf, so the details are to be completed in the future.
|
||||
|
||||
4. Reading/Writing Counters
|
||||
---------------------------
|
||||
|
||||
They seem symmetric but perf treats them quite differently. For reading, there
|
||||
is a *read* interface in *struct pmu*, but it serves more than just reading.
|
||||
According to the context, the *read* function not only reads the content of the
|
||||
counter (event->count), but also updates the left period to the next interrupt
|
||||
(event->hw.period_left).
|
||||
|
||||
But the core of perf does not need direct write to counters. Writing counters
|
||||
is hidden behind the abstraction of 1) *pmu->start*, literally start counting so one
|
||||
has to set the counter to a good value for the next interrupt; 2) inside the IRQ
|
||||
it should set the counter to the same resonable value.
|
||||
|
||||
Reading is not a problem in RISC-V but writing would need some effort, since
|
||||
counters are not allowed to be written by S-mode.
|
||||
|
||||
|
||||
5. add()/del()/start()/stop()
|
||||
-----------------------------
|
||||
|
||||
Basic idea: add()/del() adds/deletes events to/from a PMU, and start()/stop()
|
||||
starts/stop the counter of some event in the PMU. All of them take the same
|
||||
arguments: *struct perf_event *event* and *int flag*.
|
||||
|
||||
Consider perf as a state machine, then you will find that these functions serve
|
||||
as the state transition process between those states.
|
||||
Three states (event->hw.state) are defined:
|
||||
|
||||
* PERF_HES_STOPPED: the counter is stopped
|
||||
* PERF_HES_UPTODATE: the event->count is up-to-date
|
||||
* PERF_HES_ARCH: arch-dependent usage ... we don't need this for now
|
||||
|
||||
A normal flow of these state transitions are as follows:
|
||||
|
||||
* A user launches a perf event, resulting in calling to *event_init*.
|
||||
* When being context-switched in, *add* is called by the perf core, with a flag
|
||||
PERF_EF_START, which means that the event should be started after it is added.
|
||||
At this stage, a general event is bound to a physical counter, if any.
|
||||
The state changes to PERF_HES_STOPPED and PERF_HES_UPTODATE, because it is now
|
||||
stopped, and the (software) event count does not need updating.
|
||||
|
||||
- *start* is then called, and the counter is enabled.
|
||||
With flag PERF_EF_RELOAD, it writes an appropriate value to the counter (check
|
||||
previous section for detail).
|
||||
Nothing is written if the flag does not contain PERF_EF_RELOAD.
|
||||
The state now is reset to none, because it is neither stopped nor updated
|
||||
(the counting already started)
|
||||
|
||||
* When being context-switched out, *del* is called. It then checks out all the
|
||||
events in the PMU and calls *stop* to update their counts.
|
||||
|
||||
- *stop* is called by *del*
|
||||
and the perf core with flag PERF_EF_UPDATE, and it often shares the same
|
||||
subroutine as *read* with the same logic.
|
||||
The state changes to PERF_HES_STOPPED and PERF_HES_UPTODATE, again.
|
||||
|
||||
- Life cycle of these two pairs: *add* and *del* are called repeatedly as
|
||||
tasks switch in-and-out; *start* and *stop* is also called when the perf core
|
||||
needs a quick stop-and-start, for instance, when the interrupt period is being
|
||||
adjusted.
|
||||
|
||||
Current implementation is sufficient for now and can be easily extended to
|
||||
features in the future.
|
||||
|
||||
A. Related Structures
|
||||
---------------------
|
||||
|
||||
* struct pmu: include/linux/perf_event.h
|
||||
* struct riscv_pmu: arch/riscv/include/asm/perf_event.h
|
||||
|
||||
Both structures are designed to be read-only.
|
||||
|
||||
*struct pmu* defines some function pointer interfaces, and most of them take
|
||||
*struct perf_event* as a main argument, dealing with perf events according to
|
||||
perf's internal state machine (check kernel/events/core.c for details).
|
||||
|
||||
*struct riscv_pmu* defines PMU-specific parameters. The naming follows the
|
||||
convention of all other architectures.
|
||||
|
||||
* struct perf_event: include/linux/perf_event.h
|
||||
* struct hw_perf_event
|
||||
|
||||
The generic structure that represents perf events, and the hardware-related
|
||||
details.
|
||||
|
||||
* struct riscv_hw_events: arch/riscv/include/asm/perf_event.h
|
||||
|
||||
The structure that holds the status of events, has two fixed members:
|
||||
the number of events and the array of the events.
|
||||
|
||||
References
|
||||
----------
|
||||
|
||||
[1] https://github.com/riscv/riscv-linux/pull/124
|
||||
|
||||
[2] https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/f19TmCNP6yA
|
@ -16561,6 +16561,15 @@ S: Maintained
|
||||
F: drivers/mtd/nand/raw/r852.c
|
||||
F: drivers/mtd/nand/raw/r852.h
|
||||
|
||||
RISC-V PMU DRIVERS
|
||||
M: Atish Patra <atishp@atishpatra.org>
|
||||
R: Anup Patel <anup@brainfault.org>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Supported
|
||||
F: drivers/perf/riscv_pmu.c
|
||||
F: drivers/perf/riscv_pmu_legacy.c
|
||||
F: drivers/perf/riscv_pmu_sbi.c
|
||||
|
||||
RISC-V ARCHITECTURE
|
||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
|
@ -333,19 +333,6 @@ config RISCV_ISA_C
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
menu "supported PMU type"
|
||||
depends on PERF_EVENTS
|
||||
|
||||
config RISCV_BASE_PMU
|
||||
bool "Base Performance Monitoring Unit"
|
||||
def_bool y
|
||||
help
|
||||
A base PMU that serves as a reference implementation and has limited
|
||||
feature of perf. It can run on any RISC-V machines so serves as the
|
||||
fallback, but this option can also be disable to reduce kernel size.
|
||||
|
||||
endmenu
|
||||
|
||||
config FPU
|
||||
bool "FPU support"
|
||||
default y
|
||||
|
@ -66,6 +66,7 @@
|
||||
#define IRQ_S_EXT 9
|
||||
#define IRQ_VS_EXT 10
|
||||
#define IRQ_M_EXT 11
|
||||
#define IRQ_PMU_OVF 13
|
||||
|
||||
/* Exception causes */
|
||||
#define EXC_INST_MISALIGNED 0
|
||||
@ -151,9 +152,69 @@
|
||||
#define CSR_CYCLE 0xc00
|
||||
#define CSR_TIME 0xc01
|
||||
#define CSR_INSTRET 0xc02
|
||||
#define CSR_HPMCOUNTER3 0xc03
|
||||
#define CSR_HPMCOUNTER4 0xc04
|
||||
#define CSR_HPMCOUNTER5 0xc05
|
||||
#define CSR_HPMCOUNTER6 0xc06
|
||||
#define CSR_HPMCOUNTER7 0xc07
|
||||
#define CSR_HPMCOUNTER8 0xc08
|
||||
#define CSR_HPMCOUNTER9 0xc09
|
||||
#define CSR_HPMCOUNTER10 0xc0a
|
||||
#define CSR_HPMCOUNTER11 0xc0b
|
||||
#define CSR_HPMCOUNTER12 0xc0c
|
||||
#define CSR_HPMCOUNTER13 0xc0d
|
||||
#define CSR_HPMCOUNTER14 0xc0e
|
||||
#define CSR_HPMCOUNTER15 0xc0f
|
||||
#define CSR_HPMCOUNTER16 0xc10
|
||||
#define CSR_HPMCOUNTER17 0xc11
|
||||
#define CSR_HPMCOUNTER18 0xc12
|
||||
#define CSR_HPMCOUNTER19 0xc13
|
||||
#define CSR_HPMCOUNTER20 0xc14
|
||||
#define CSR_HPMCOUNTER21 0xc15
|
||||
#define CSR_HPMCOUNTER22 0xc16
|
||||
#define CSR_HPMCOUNTER23 0xc17
|
||||
#define CSR_HPMCOUNTER24 0xc18
|
||||
#define CSR_HPMCOUNTER25 0xc19
|
||||
#define CSR_HPMCOUNTER26 0xc1a
|
||||
#define CSR_HPMCOUNTER27 0xc1b
|
||||
#define CSR_HPMCOUNTER28 0xc1c
|
||||
#define CSR_HPMCOUNTER29 0xc1d
|
||||
#define CSR_HPMCOUNTER30 0xc1e
|
||||
#define CSR_HPMCOUNTER31 0xc1f
|
||||
#define CSR_CYCLEH 0xc80
|
||||
#define CSR_TIMEH 0xc81
|
||||
#define CSR_INSTRETH 0xc82
|
||||
#define CSR_HPMCOUNTER3H 0xc83
|
||||
#define CSR_HPMCOUNTER4H 0xc84
|
||||
#define CSR_HPMCOUNTER5H 0xc85
|
||||
#define CSR_HPMCOUNTER6H 0xc86
|
||||
#define CSR_HPMCOUNTER7H 0xc87
|
||||
#define CSR_HPMCOUNTER8H 0xc88
|
||||
#define CSR_HPMCOUNTER9H 0xc89
|
||||
#define CSR_HPMCOUNTER10H 0xc8a
|
||||
#define CSR_HPMCOUNTER11H 0xc8b
|
||||
#define CSR_HPMCOUNTER12H 0xc8c
|
||||
#define CSR_HPMCOUNTER13H 0xc8d
|
||||
#define CSR_HPMCOUNTER14H 0xc8e
|
||||
#define CSR_HPMCOUNTER15H 0xc8f
|
||||
#define CSR_HPMCOUNTER16H 0xc90
|
||||
#define CSR_HPMCOUNTER17H 0xc91
|
||||
#define CSR_HPMCOUNTER18H 0xc92
|
||||
#define CSR_HPMCOUNTER19H 0xc93
|
||||
#define CSR_HPMCOUNTER20H 0xc94
|
||||
#define CSR_HPMCOUNTER21H 0xc95
|
||||
#define CSR_HPMCOUNTER22H 0xc96
|
||||
#define CSR_HPMCOUNTER23H 0xc97
|
||||
#define CSR_HPMCOUNTER24H 0xc98
|
||||
#define CSR_HPMCOUNTER25H 0xc99
|
||||
#define CSR_HPMCOUNTER26H 0xc9a
|
||||
#define CSR_HPMCOUNTER27H 0xc9b
|
||||
#define CSR_HPMCOUNTER28H 0xc9c
|
||||
#define CSR_HPMCOUNTER29H 0xc9d
|
||||
#define CSR_HPMCOUNTER30H 0xc9e
|
||||
#define CSR_HPMCOUNTER31H 0xc9f
|
||||
|
||||
#define CSR_SSCOUNTOVF 0xda0
|
||||
|
||||
#define CSR_SSTATUS 0x100
|
||||
#define CSR_SIE 0x104
|
||||
@ -241,7 +302,10 @@
|
||||
# define RV_IRQ_SOFT IRQ_S_SOFT
|
||||
# define RV_IRQ_TIMER IRQ_S_TIMER
|
||||
# define RV_IRQ_EXT IRQ_S_EXT
|
||||
#endif /* CONFIG_RISCV_M_MODE */
|
||||
# define RV_IRQ_PMU IRQ_PMU_OVF
|
||||
# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF)
|
||||
|
||||
#endif /* !CONFIG_RISCV_M_MODE */
|
||||
|
||||
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
|
||||
#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
|
||||
|
@ -51,6 +51,7 @@ extern unsigned long elf_hwcap;
|
||||
* available logical extension id.
|
||||
*/
|
||||
enum riscv_isa_ext_id {
|
||||
RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE,
|
||||
RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
|
||||
};
|
||||
|
||||
|
@ -9,77 +9,5 @@
|
||||
#define _ASM_RISCV_PERF_EVENT_H
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_BASE_PMU
|
||||
#define RISCV_BASE_COUNTERS 2
|
||||
|
||||
/*
|
||||
* The RISCV_MAX_COUNTERS parameter should be specified.
|
||||
*/
|
||||
|
||||
#define RISCV_MAX_COUNTERS 2
|
||||
|
||||
/*
|
||||
* These are the indexes of bits in counteren register *minus* 1,
|
||||
* except for cycle. It would be coherent if it can directly mapped
|
||||
* to counteren bit definition, but there is a *time* register at
|
||||
* counteren[1]. Per-cpu structure is scarce resource here.
|
||||
*
|
||||
* According to the spec, an implementation can support counter up to
|
||||
* mhpmcounter31, but many high-end processors has at most 6 general
|
||||
* PMCs, we give the definition to MHPMCOUNTER8 here.
|
||||
*/
|
||||
#define RISCV_PMU_CYCLE 0
|
||||
#define RISCV_PMU_INSTRET 1
|
||||
#define RISCV_PMU_MHPMCOUNTER3 2
|
||||
#define RISCV_PMU_MHPMCOUNTER4 3
|
||||
#define RISCV_PMU_MHPMCOUNTER5 4
|
||||
#define RISCV_PMU_MHPMCOUNTER6 5
|
||||
#define RISCV_PMU_MHPMCOUNTER7 6
|
||||
#define RISCV_PMU_MHPMCOUNTER8 7
|
||||
|
||||
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
|
||||
|
||||
struct cpu_hw_events {
|
||||
/* # currently enabled events*/
|
||||
int n_events;
|
||||
/* currently enabled events */
|
||||
struct perf_event *events[RISCV_MAX_COUNTERS];
|
||||
/* vendor-defined PMU data */
|
||||
void *platform;
|
||||
};
|
||||
|
||||
struct riscv_pmu {
|
||||
struct pmu *pmu;
|
||||
|
||||
/* generic hw/cache events table */
|
||||
const int *hw_events;
|
||||
const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||
/* method used to map hw/cache events */
|
||||
int (*map_hw_event)(u64 config);
|
||||
int (*map_cache_event)(u64 config);
|
||||
|
||||
/* max generic hw events in map */
|
||||
int max_events;
|
||||
/* number total counters, 2(base) + x(general) */
|
||||
int num_counters;
|
||||
/* the width of the counter */
|
||||
int counter_width;
|
||||
|
||||
/* vendor-defined PMU features */
|
||||
void *platform;
|
||||
|
||||
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
||||
int irq;
|
||||
};
|
||||
|
||||
#endif
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_PERF_EVENT_H */
|
||||
|
@ -29,6 +29,7 @@ enum sbi_ext_id {
|
||||
SBI_EXT_RFENCE = 0x52464E43,
|
||||
SBI_EXT_HSM = 0x48534D,
|
||||
SBI_EXT_SRST = 0x53525354,
|
||||
SBI_EXT_PMU = 0x504D55,
|
||||
|
||||
/* Experimentals extensions must lie within this range */
|
||||
SBI_EXT_EXPERIMENTAL_START = 0x08000000,
|
||||
@ -95,6 +96,98 @@ enum sbi_srst_reset_reason {
|
||||
SBI_SRST_RESET_REASON_SYS_FAILURE,
|
||||
};
|
||||
|
||||
enum sbi_ext_pmu_fid {
|
||||
SBI_EXT_PMU_NUM_COUNTERS = 0,
|
||||
SBI_EXT_PMU_COUNTER_GET_INFO,
|
||||
SBI_EXT_PMU_COUNTER_CFG_MATCH,
|
||||
SBI_EXT_PMU_COUNTER_START,
|
||||
SBI_EXT_PMU_COUNTER_STOP,
|
||||
SBI_EXT_PMU_COUNTER_FW_READ,
|
||||
};
|
||||
|
||||
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(55, 0)
|
||||
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
|
||||
|
||||
/** General pmu event codes specified in SBI PMU extension */
|
||||
enum sbi_pmu_hw_generic_events_t {
|
||||
SBI_PMU_HW_NO_EVENT = 0,
|
||||
SBI_PMU_HW_CPU_CYCLES = 1,
|
||||
SBI_PMU_HW_INSTRUCTIONS = 2,
|
||||
SBI_PMU_HW_CACHE_REFERENCES = 3,
|
||||
SBI_PMU_HW_CACHE_MISSES = 4,
|
||||
SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5,
|
||||
SBI_PMU_HW_BRANCH_MISSES = 6,
|
||||
SBI_PMU_HW_BUS_CYCLES = 7,
|
||||
SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8,
|
||||
SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9,
|
||||
SBI_PMU_HW_REF_CPU_CYCLES = 10,
|
||||
|
||||
SBI_PMU_HW_GENERAL_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* Special "firmware" events provided by the firmware, even if the hardware
|
||||
* does not support performance events. These events are encoded as a raw
|
||||
* event type in Linux kernel perf framework.
|
||||
*/
|
||||
enum sbi_pmu_fw_generic_events_t {
|
||||
SBI_PMU_FW_MISALIGNED_LOAD = 0,
|
||||
SBI_PMU_FW_MISALIGNED_STORE = 1,
|
||||
SBI_PMU_FW_ACCESS_LOAD = 2,
|
||||
SBI_PMU_FW_ACCESS_STORE = 3,
|
||||
SBI_PMU_FW_ILLEGAL_INSN = 4,
|
||||
SBI_PMU_FW_SET_TIMER = 5,
|
||||
SBI_PMU_FW_IPI_SENT = 6,
|
||||
SBI_PMU_FW_IPI_RECVD = 7,
|
||||
SBI_PMU_FW_FENCE_I_SENT = 8,
|
||||
SBI_PMU_FW_FENCE_I_RECVD = 9,
|
||||
SBI_PMU_FW_SFENCE_VMA_SENT = 10,
|
||||
SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
|
||||
SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
|
||||
SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
|
||||
|
||||
SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
|
||||
SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
|
||||
SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
|
||||
SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
|
||||
|
||||
SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
|
||||
SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
|
||||
SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
|
||||
SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
|
||||
SBI_PMU_FW_MAX,
|
||||
};
|
||||
|
||||
/* SBI PMU event types */
|
||||
enum sbi_pmu_event_type {
|
||||
SBI_PMU_EVENT_TYPE_HW = 0x0,
|
||||
SBI_PMU_EVENT_TYPE_CACHE = 0x1,
|
||||
SBI_PMU_EVENT_TYPE_RAW = 0x2,
|
||||
SBI_PMU_EVENT_TYPE_FW = 0xf,
|
||||
};
|
||||
|
||||
/* SBI PMU event types */
|
||||
enum sbi_pmu_ctr_type {
|
||||
SBI_PMU_CTR_TYPE_HW = 0x0,
|
||||
SBI_PMU_CTR_TYPE_FW,
|
||||
};
|
||||
|
||||
/* Flags defined for config matching function */
|
||||
#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0)
|
||||
#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1)
|
||||
#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2)
|
||||
#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3)
|
||||
#define SBI_PMU_CFG_FLAG_SET_VSNH (1 << 4)
|
||||
#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5)
|
||||
#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6)
|
||||
#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7)
|
||||
|
||||
/* Flags defined for counter start function */
|
||||
#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0)
|
||||
|
||||
/* Flags defined for counter stop function */
|
||||
#define SBI_PMU_STOP_FLAG_RESET (1 << 0)
|
||||
|
||||
#define SBI_SPEC_VERSION_DEFAULT 0x1
|
||||
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
|
||||
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
|
||||
@ -108,6 +201,8 @@ enum sbi_srst_reset_reason {
|
||||
#define SBI_ERR_DENIED -4
|
||||
#define SBI_ERR_INVALID_ADDRESS -5
|
||||
#define SBI_ERR_ALREADY_AVAILABLE -6
|
||||
#define SBI_ERR_ALREADY_STARTED -7
|
||||
#define SBI_ERR_ALREADY_STOPPED -8
|
||||
|
||||
extern unsigned long sbi_spec_version;
|
||||
struct sbiret {
|
||||
|
@ -51,7 +51,6 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
|
||||
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
|
||||
|
||||
obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
|
||||
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
|
||||
obj-$(CONFIG_RISCV_SBI) += sbi.o
|
||||
|
@ -87,6 +87,7 @@ int riscv_of_parent_hartid(struct device_node *node)
|
||||
* extensions by an underscore.
|
||||
*/
|
||||
static struct riscv_isa_ext_data isa_ext_arr[] = {
|
||||
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
|
||||
__RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
|
||||
};
|
||||
|
||||
|
@ -190,6 +190,8 @@ void __init riscv_fill_hwcap(void)
|
||||
if (!ext_long) {
|
||||
this_hwcap |= isa2hwcap[(unsigned char)(*ext)];
|
||||
set_bit(*ext - 'a', this_isa);
|
||||
} else {
|
||||
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
|
||||
}
|
||||
#undef SET_ISA_EXT_MAP
|
||||
}
|
||||
|
@ -1,485 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
* Copyright 2014 Tilera Corporation. All Rights Reserved.
|
||||
* Copyright (C) 2018 Andes Technology Corporation
|
||||
*
|
||||
* Perf_events support for RISC-V platforms.
|
||||
*
|
||||
* Since the spec. (as of now, Priv-Spec 1.10) does not provide enough
|
||||
* functionality for perf event to fully work, this file provides
|
||||
* the very basic framework only.
|
||||
*
|
||||
* For platform portings, please check Documentations/riscv/pmu.txt.
|
||||
*
|
||||
* The Copyright line includes x86 and tile ones.
|
||||
*/
|
||||
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/of.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
static const struct riscv_pmu *riscv_pmu __read_mostly;
|
||||
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
||||
|
||||
/*
|
||||
* Hardware & cache maps and their methods
|
||||
*/
|
||||
|
||||
static const int riscv_hw_event_map[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = RISCV_PMU_CYCLE,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = RISCV_PMU_INSTRET,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = RISCV_OP_UNSUPP,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = RISCV_OP_UNSUPP,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = RISCV_OP_UNSUPP,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = RISCV_OP_UNSUPP,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = RISCV_OP_UNSUPP,
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
||||
[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static int riscv_map_hw_event(u64 config)
|
||||
{
|
||||
if (config >= riscv_pmu->max_events)
|
||||
return -EINVAL;
|
||||
|
||||
return riscv_pmu->hw_events[config];
|
||||
}
|
||||
|
||||
static int riscv_map_cache_decode(u64 config, unsigned int *type,
|
||||
unsigned int *op, unsigned int *result)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int riscv_map_cache_event(u64 config)
|
||||
{
|
||||
unsigned int type, op, result;
|
||||
int err = -ENOENT;
|
||||
int code;
|
||||
|
||||
err = riscv_map_cache_decode(config, &type, &op, &result);
|
||||
if (!riscv_pmu->cache_events || err)
|
||||
return err;
|
||||
|
||||
if (type >= PERF_COUNT_HW_CACHE_MAX ||
|
||||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
|
||||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
code = (*riscv_pmu->cache_events)[type][op][result];
|
||||
if (code == RISCV_OP_UNSUPP)
|
||||
return -EINVAL;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
/*
|
||||
* Low-level functions: reading/writing counters
|
||||
*/
|
||||
|
||||
static inline u64 read_counter(int idx)
|
||||
{
|
||||
u64 val = 0;
|
||||
|
||||
switch (idx) {
|
||||
case RISCV_PMU_CYCLE:
|
||||
val = csr_read(CSR_CYCLE);
|
||||
break;
|
||||
case RISCV_PMU_INSTRET:
|
||||
val = csr_read(CSR_INSTRET);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void write_counter(int idx, u64 value)
|
||||
{
|
||||
/* currently not supported */
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* pmu->read: read and update the counter
|
||||
*
|
||||
* Other architectures' implementation often have a xxx_perf_event_update
|
||||
* routine, which can return counter values when called in the IRQ, but
|
||||
* return void when being called by the pmu->read method.
|
||||
*/
|
||||
static void riscv_pmu_read(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 prev_raw_count, new_raw_count;
|
||||
u64 oldval;
|
||||
int idx = hwc->idx;
|
||||
u64 delta;
|
||||
|
||||
do {
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
new_raw_count = read_counter(idx);
|
||||
|
||||
oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count);
|
||||
} while (oldval != prev_raw_count);
|
||||
|
||||
/*
|
||||
* delta is the value to update the counter we maintain in the kernel.
|
||||
*/
|
||||
delta = (new_raw_count - prev_raw_count) &
|
||||
((1ULL << riscv_pmu->counter_width) - 1);
|
||||
local64_add(delta, &event->count);
|
||||
/*
|
||||
* Something like local64_sub(delta, &hwc->period_left) here is
|
||||
* needed if there is an interrupt for perf.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* State transition functions:
|
||||
*
|
||||
* stop()/start() & add()/del()
|
||||
*/
|
||||
|
||||
/*
|
||||
* pmu->stop: stop the counter
|
||||
*/
|
||||
static void riscv_pmu_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||
hwc->state |= PERF_HES_STOPPED;
|
||||
|
||||
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
||||
riscv_pmu->pmu->read(event);
|
||||
hwc->state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* pmu->start: start the event.
|
||||
*/
|
||||
static void riscv_pmu_start(struct perf_event *event, int flags)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
||||
return;
|
||||
|
||||
if (flags & PERF_EF_RELOAD) {
|
||||
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
|
||||
|
||||
/*
|
||||
* Set the counter to the period to the next interrupt here,
|
||||
* if you have any.
|
||||
*/
|
||||
}
|
||||
|
||||
hwc->state = 0;
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
/*
|
||||
* Since we cannot write to counters, this serves as an initialization
|
||||
* to the delta-mechanism in pmu->read(); otherwise, the delta would be
|
||||
* wrong when pmu->read is called for the first time.
|
||||
*/
|
||||
local64_set(&hwc->prev_count, read_counter(hwc->idx));
|
||||
}
|
||||
|
||||
/*
|
||||
* pmu->add: add the event to PMU.
|
||||
*/
|
||||
static int riscv_pmu_add(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (cpuc->n_events == riscv_pmu->num_counters)
|
||||
return -ENOSPC;
|
||||
|
||||
/*
|
||||
* We don't have general conunters, so no binding-event-to-counter
|
||||
* process here.
|
||||
*
|
||||
* Indexing using hwc->config generally not works, since config may
|
||||
* contain extra information, but here the only info we have in
|
||||
* hwc->config is the event index.
|
||||
*/
|
||||
hwc->idx = hwc->config;
|
||||
cpuc->events[hwc->idx] = event;
|
||||
cpuc->n_events++;
|
||||
|
||||
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
||||
|
||||
if (flags & PERF_EF_START)
|
||||
riscv_pmu->pmu->start(event, PERF_EF_RELOAD);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* pmu->del: delete the event from PMU.
|
||||
*/
|
||||
static void riscv_pmu_del(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
cpuc->events[hwc->idx] = NULL;
|
||||
cpuc->n_events--;
|
||||
riscv_pmu->pmu->stop(event, PERF_EF_UPDATE);
|
||||
perf_event_update_userpage(event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt: a skeletion for reference.
|
||||
*/
|
||||
|
||||
static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||
|
||||
static irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
|
||||
{
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static int reserve_pmc_hardware(void)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
if (riscv_pmu->irq >= 0 && riscv_pmu->handle_irq) {
|
||||
err = request_irq(riscv_pmu->irq, riscv_pmu->handle_irq,
|
||||
IRQF_PERCPU, "riscv-base-perf", NULL);
|
||||
}
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void release_pmc_hardware(void)
|
||||
{
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
if (riscv_pmu->irq >= 0)
|
||||
free_irq(riscv_pmu->irq, NULL);
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Event Initialization/Finalization
|
||||
*/
|
||||
|
||||
static atomic_t riscv_active_events = ATOMIC_INIT(0);
|
||||
|
||||
static void riscv_event_destroy(struct perf_event *event)
|
||||
{
|
||||
if (atomic_dec_return(&riscv_active_events) == 0)
|
||||
release_pmc_hardware();
|
||||
}
|
||||
|
||||
static int riscv_event_init(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int err;
|
||||
int code;
|
||||
|
||||
if (atomic_inc_return(&riscv_active_events) == 1) {
|
||||
err = reserve_pmc_hardware();
|
||||
|
||||
if (err) {
|
||||
pr_warn("PMC hardware not available\n");
|
||||
atomic_dec(&riscv_active_events);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
code = riscv_pmu->map_hw_event(attr->config);
|
||||
break;
|
||||
case PERF_TYPE_HW_CACHE:
|
||||
code = riscv_pmu->map_cache_event(attr->config);
|
||||
break;
|
||||
case PERF_TYPE_RAW:
|
||||
return -EOPNOTSUPP;
|
||||
default:
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
event->destroy = riscv_event_destroy;
|
||||
if (code < 0) {
|
||||
event->destroy(event);
|
||||
return code;
|
||||
}
|
||||
|
||||
/*
|
||||
* idx is set to -1 because the index of a general event should not be
|
||||
* decided until binding to some counter in pmu->add().
|
||||
*
|
||||
* But since we don't have such support, later in pmu->add(), we just
|
||||
* use hwc->config as the index instead.
|
||||
*/
|
||||
hwc->config = code;
|
||||
hwc->idx = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialization
|
||||
*/
|
||||
|
||||
static struct pmu min_pmu = {
|
||||
.name = "riscv-base",
|
||||
.event_init = riscv_event_init,
|
||||
.add = riscv_pmu_add,
|
||||
.del = riscv_pmu_del,
|
||||
.start = riscv_pmu_start,
|
||||
.stop = riscv_pmu_stop,
|
||||
.read = riscv_pmu_read,
|
||||
};
|
||||
|
||||
static const struct riscv_pmu riscv_base_pmu = {
|
||||
.pmu = &min_pmu,
|
||||
.max_events = ARRAY_SIZE(riscv_hw_event_map),
|
||||
.map_hw_event = riscv_map_hw_event,
|
||||
.hw_events = riscv_hw_event_map,
|
||||
.map_cache_event = riscv_map_cache_event,
|
||||
.cache_events = &riscv_cache_event_map,
|
||||
.counter_width = 63,
|
||||
.num_counters = RISCV_BASE_COUNTERS + 0,
|
||||
.handle_irq = &riscv_base_pmu_handle_irq,
|
||||
|
||||
/* This means this PMU has no IRQ. */
|
||||
.irq = -1,
|
||||
};
|
||||
|
||||
static const struct of_device_id riscv_pmu_of_ids[] = {
|
||||
{.compatible = "riscv,base-pmu", .data = &riscv_base_pmu},
|
||||
{ /* sentinel value */ }
|
||||
};
|
||||
|
||||
static int __init init_hw_perf_events(void)
|
||||
{
|
||||
struct device_node *node = of_find_node_by_type(NULL, "pmu");
|
||||
const struct of_device_id *of_id;
|
||||
|
||||
riscv_pmu = &riscv_base_pmu;
|
||||
|
||||
if (node) {
|
||||
of_id = of_match_node(riscv_pmu_of_ids, node);
|
||||
|
||||
if (of_id)
|
||||
riscv_pmu = of_id->data;
|
||||
of_node_put(node);
|
||||
}
|
||||
|
||||
perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(init_hw_perf_events);
|
@ -56,6 +56,36 @@ config ARM_PMU
|
||||
Say y if you want to use CPU performance monitors on ARM-based
|
||||
systems.
|
||||
|
||||
config RISCV_PMU
|
||||
depends on RISCV
|
||||
bool "RISC-V PMU framework"
|
||||
default y
|
||||
help
|
||||
Say y if you want to use CPU performance monitors on RISCV-based
|
||||
systems. This provides the core PMU framework that abstracts common
|
||||
PMU functionalities in a core library so that different PMU drivers
|
||||
can reuse it.
|
||||
|
||||
config RISCV_PMU_LEGACY
|
||||
depends on RISCV_PMU
|
||||
bool "RISC-V legacy PMU implementation"
|
||||
default y
|
||||
help
|
||||
Say y if you want to use the legacy CPU performance monitor
|
||||
implementation on RISC-V based systems. This only allows counting
|
||||
of cycle/instruction counter and doesn't support counter overflow,
|
||||
or programmable counters. It will be removed in future.
|
||||
|
||||
config RISCV_PMU_SBI
|
||||
depends on RISCV_PMU && RISCV_SBI
|
||||
bool "RISC-V PMU based on SBI PMU extension"
|
||||
default y
|
||||
help
|
||||
Say y if you want to use the CPU performance monitor
|
||||
using SBI PMU extension on RISC-V based systems. This option provides
|
||||
full perf feature support i.e. counter overflow, privilege mode
|
||||
filtering, counter configuration.
|
||||
|
||||
config ARM_PMU_ACPI
|
||||
depends on ARM_PMU && ACPI
|
||||
def_bool y
|
||||
|
@ -10,6 +10,9 @@ obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
|
||||
obj-$(CONFIG_HISI_PMU) += hisilicon/
|
||||
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
|
||||
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
|
||||
obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o
|
||||
obj-$(CONFIG_RISCV_PMU_LEGACY) += riscv_pmu_legacy.o
|
||||
obj-$(CONFIG_RISCV_PMU_SBI) += riscv_pmu_sbi.o
|
||||
obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
|
||||
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
|
||||
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
|
||||
|
324
drivers/perf/riscv_pmu.c
Normal file
324
drivers/perf/riscv_pmu.c
Normal file
@ -0,0 +1,324 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* RISC-V performance counter support.
|
||||
*
|
||||
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
|
||||
*
|
||||
* This implementation is based on old RISC-V perf and ARM perf event code
|
||||
* which are in turn based on sparc64 and x86 code.
|
||||
*/
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
#include <linux/perf/riscv_pmu.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/sbi.h>
|
||||
|
||||
static unsigned long csr_read_num(int csr_num)
|
||||
{
|
||||
#define switchcase_csr_read(__csr_num, __val) {\
|
||||
case __csr_num: \
|
||||
__val = csr_read(__csr_num); \
|
||||
break; }
|
||||
#define switchcase_csr_read_2(__csr_num, __val) {\
|
||||
switchcase_csr_read(__csr_num + 0, __val) \
|
||||
switchcase_csr_read(__csr_num + 1, __val)}
|
||||
#define switchcase_csr_read_4(__csr_num, __val) {\
|
||||
switchcase_csr_read_2(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_2(__csr_num + 2, __val)}
|
||||
#define switchcase_csr_read_8(__csr_num, __val) {\
|
||||
switchcase_csr_read_4(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_4(__csr_num + 4, __val)}
|
||||
#define switchcase_csr_read_16(__csr_num, __val) {\
|
||||
switchcase_csr_read_8(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_8(__csr_num + 8, __val)}
|
||||
#define switchcase_csr_read_32(__csr_num, __val) {\
|
||||
switchcase_csr_read_16(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_16(__csr_num + 16, __val)}
|
||||
|
||||
unsigned long ret = 0;
|
||||
|
||||
switch (csr_num) {
|
||||
switchcase_csr_read_32(CSR_CYCLE, ret)
|
||||
switchcase_csr_read_32(CSR_CYCLEH, ret)
|
||||
default :
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
#undef switchcase_csr_read_32
|
||||
#undef switchcase_csr_read_16
|
||||
#undef switchcase_csr_read_8
|
||||
#undef switchcase_csr_read_4
|
||||
#undef switchcase_csr_read_2
|
||||
#undef switchcase_csr_read
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the CSR of a corresponding counter.
|
||||
*/
|
||||
unsigned long riscv_pmu_ctr_read_csr(unsigned long csr)
|
||||
{
|
||||
if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H ||
|
||||
(csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) {
|
||||
pr_err("Invalid performance counter csr %lx\n", csr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return csr_read_num(csr);
|
||||
}
|
||||
|
||||
u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
|
||||
{
|
||||
int cwidth;
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (!rvpmu->ctr_get_width)
|
||||
/**
|
||||
* If the pmu driver doesn't support counter width, set it to default
|
||||
* maximum allowed by the specification.
|
||||
*/
|
||||
cwidth = 63;
|
||||
else {
|
||||
if (hwc->idx == -1)
|
||||
/* Handle init case where idx is not initialized yet */
|
||||
cwidth = rvpmu->ctr_get_width(0);
|
||||
else
|
||||
cwidth = rvpmu->ctr_get_width(hwc->idx);
|
||||
}
|
||||
|
||||
return GENMASK_ULL(cwidth, 0);
|
||||
}
|
||||
|
||||
u64 riscv_pmu_event_update(struct perf_event *event)
|
||||
{
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 prev_raw_count, new_raw_count;
|
||||
unsigned long cmask;
|
||||
u64 oldval, delta;
|
||||
|
||||
if (!rvpmu->ctr_read)
|
||||
return 0;
|
||||
|
||||
cmask = riscv_pmu_ctr_get_width_mask(event);
|
||||
|
||||
do {
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
new_raw_count = rvpmu->ctr_read(event);
|
||||
oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count);
|
||||
} while (oldval != prev_raw_count);
|
||||
|
||||
delta = (new_raw_count - prev_raw_count) & cmask;
|
||||
local64_add(delta, &event->count);
|
||||
local64_sub(delta, &hwc->period_left);
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
static void riscv_pmu_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
|
||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||
|
||||
if (!(hwc->state & PERF_HES_STOPPED)) {
|
||||
if (rvpmu->ctr_stop) {
|
||||
rvpmu->ctr_stop(event, 0);
|
||||
hwc->state |= PERF_HES_STOPPED;
|
||||
}
|
||||
riscv_pmu_event_update(event);
|
||||
hwc->state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
}
|
||||
|
||||
int riscv_pmu_event_set_period(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
s64 left = local64_read(&hwc->period_left);
|
||||
s64 period = hwc->sample_period;
|
||||
int overflow = 0;
|
||||
uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
|
||||
|
||||
if (unlikely(left <= -period)) {
|
||||
left = period;
|
||||
local64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
overflow = 1;
|
||||
}
|
||||
|
||||
if (unlikely(left <= 0)) {
|
||||
left += period;
|
||||
local64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
overflow = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit the maximum period to prevent the counter value
|
||||
* from overtaking the one we are about to program. In
|
||||
* effect we are reducing max_period to account for
|
||||
* interrupt latency (and we are being very conservative).
|
||||
*/
|
||||
if (left > (max_period >> 1))
|
||||
left = (max_period >> 1);
|
||||
|
||||
local64_set(&hwc->prev_count, (u64)-left);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static void riscv_pmu_start(struct perf_event *event, int flags)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
|
||||
u64 init_val;
|
||||
|
||||
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
||||
return;
|
||||
|
||||
if (flags & PERF_EF_RELOAD)
|
||||
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
|
||||
|
||||
hwc->state = 0;
|
||||
riscv_pmu_event_set_period(event);
|
||||
init_val = local64_read(&hwc->prev_count) & max_period;
|
||||
rvpmu->ctr_start(event, init_val);
|
||||
perf_event_update_userpage(event);
|
||||
}
|
||||
|
||||
static int riscv_pmu_add(struct perf_event *event, int flags)
|
||||
{
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx;
|
||||
|
||||
idx = rvpmu->ctr_get_idx(event);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
hwc->idx = idx;
|
||||
cpuc->events[idx] = event;
|
||||
cpuc->n_events++;
|
||||
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
||||
if (flags & PERF_EF_START)
|
||||
riscv_pmu_start(event, PERF_EF_RELOAD);
|
||||
|
||||
/* Propagate our changes to the userspace mapping. */
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void riscv_pmu_del(struct perf_event *event, int flags)
|
||||
{
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
riscv_pmu_stop(event, PERF_EF_UPDATE);
|
||||
cpuc->events[hwc->idx] = NULL;
|
||||
/* The firmware need to reset the counter mapping */
|
||||
if (rvpmu->ctr_stop)
|
||||
rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
|
||||
cpuc->n_events--;
|
||||
if (rvpmu->ctr_clear_idx)
|
||||
rvpmu->ctr_clear_idx(event);
|
||||
perf_event_update_userpage(event);
|
||||
hwc->idx = -1;
|
||||
}
|
||||
|
||||
static void riscv_pmu_read(struct perf_event *event)
|
||||
{
|
||||
riscv_pmu_event_update(event);
|
||||
}
|
||||
|
||||
static int riscv_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
int mapped_event;
|
||||
u64 event_config = 0;
|
||||
uint64_t cmask;
|
||||
|
||||
hwc->flags = 0;
|
||||
mapped_event = rvpmu->event_map(event, &event_config);
|
||||
if (mapped_event < 0) {
|
||||
pr_debug("event %x:%llx not supported\n", event->attr.type,
|
||||
event->attr.config);
|
||||
return mapped_event;
|
||||
}
|
||||
|
||||
/*
|
||||
* idx is set to -1 because the index of a general event should not be
|
||||
* decided until binding to some counter in pmu->add().
|
||||
* config will contain the information about counter CSR
|
||||
* the idx will contain the counter index
|
||||
*/
|
||||
hwc->config = event_config;
|
||||
hwc->idx = -1;
|
||||
hwc->event_base = mapped_event;
|
||||
|
||||
if (!is_sampling_event(event)) {
|
||||
/*
|
||||
* For non-sampling runs, limit the sample_period to half
|
||||
* of the counter width. That way, the new counter value
|
||||
* is far less likely to overtake the previous one unless
|
||||
* you have some serious IRQ latency issues.
|
||||
*/
|
||||
cmask = riscv_pmu_ctr_get_width_mask(event);
|
||||
hwc->sample_period = cmask >> 1;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
local64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct riscv_pmu *riscv_pmu_alloc(void)
|
||||
{
|
||||
struct riscv_pmu *pmu;
|
||||
int cpuid, i;
|
||||
struct cpu_hw_events *cpuc;
|
||||
|
||||
pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
|
||||
if (!pmu)
|
||||
goto out;
|
||||
|
||||
pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL);
|
||||
if (!pmu->hw_events) {
|
||||
pr_info("failed to allocate per-cpu PMU data.\n");
|
||||
goto out_free_pmu;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpuid) {
|
||||
cpuc = per_cpu_ptr(pmu->hw_events, cpuid);
|
||||
cpuc->n_events = 0;
|
||||
for (i = 0; i < RISCV_MAX_COUNTERS; i++)
|
||||
cpuc->events[i] = NULL;
|
||||
}
|
||||
pmu->pmu = (struct pmu) {
|
||||
.event_init = riscv_pmu_event_init,
|
||||
.add = riscv_pmu_add,
|
||||
.del = riscv_pmu_del,
|
||||
.start = riscv_pmu_start,
|
||||
.stop = riscv_pmu_stop,
|
||||
.read = riscv_pmu_read,
|
||||
};
|
||||
|
||||
return pmu;
|
||||
|
||||
out_free_pmu:
|
||||
kfree(pmu);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
142
drivers/perf/riscv_pmu_legacy.c
Normal file
142
drivers/perf/riscv_pmu_legacy.c
Normal file
@ -0,0 +1,142 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* RISC-V performance counter support.
|
||||
*
|
||||
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
|
||||
*
|
||||
* This implementation is based on old RISC-V perf and ARM perf event code
|
||||
* which are in turn based on sparc64 and x86 code.
|
||||
*/
|
||||
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/perf/riscv_pmu.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#define RISCV_PMU_LEGACY_CYCLE 0
|
||||
#define RISCV_PMU_LEGACY_INSTRET 1
|
||||
#define RISCV_PMU_LEGACY_NUM_CTR 2
|
||||
|
||||
static bool pmu_init_done;
|
||||
|
||||
static int pmu_legacy_ctr_get_idx(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
|
||||
if (event->attr.type != PERF_TYPE_HARDWARE)
|
||||
return -EOPNOTSUPP;
|
||||
if (attr->config == PERF_COUNT_HW_CPU_CYCLES)
|
||||
return RISCV_PMU_LEGACY_CYCLE;
|
||||
else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS)
|
||||
return RISCV_PMU_LEGACY_INSTRET;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* For legacy config & counter index are same */
|
||||
static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
|
||||
{
|
||||
return pmu_legacy_ctr_get_idx(event);
|
||||
}
|
||||
|
||||
static u64 pmu_legacy_read_ctr(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
u64 val;
|
||||
|
||||
if (idx == RISCV_PMU_LEGACY_CYCLE) {
|
||||
val = riscv_pmu_ctr_read_csr(CSR_CYCLE);
|
||||
if (IS_ENABLED(CONFIG_32BIT))
|
||||
val = (u64)riscv_pmu_ctr_read_csr(CSR_CYCLEH) << 32 | val;
|
||||
} else if (idx == RISCV_PMU_LEGACY_INSTRET) {
|
||||
val = riscv_pmu_ctr_read_csr(CSR_INSTRET);
|
||||
if (IS_ENABLED(CONFIG_32BIT))
|
||||
val = ((u64)riscv_pmu_ctr_read_csr(CSR_INSTRETH)) << 32 | val;
|
||||
} else
|
||||
return 0;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 initial_val = pmu_legacy_read_ctr(event);
|
||||
|
||||
/**
|
||||
* The legacy method doesn't really have a start/stop method.
|
||||
* It also can not update the counter with a initial value.
|
||||
* But we still need to set the prev_count so that read() can compute
|
||||
* the delta. Just use the current counter value to set the prev_count.
|
||||
*/
|
||||
local64_set(&hwc->prev_count, initial_val);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is just a simple implementation to allow legacy implementations
|
||||
* compatible with new RISC-V PMU driver framework.
|
||||
* This driver only allows reading two counters i.e CYCLE & INSTRET.
|
||||
* However, it can not start or stop the counter. Thus, it is not very useful
|
||||
* will be removed in future.
|
||||
*/
|
||||
static void pmu_legacy_init(struct riscv_pmu *pmu)
|
||||
{
|
||||
pr_info("Legacy PMU implementation is available\n");
|
||||
|
||||
pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
|
||||
pmu->ctr_start = pmu_legacy_ctr_start;
|
||||
pmu->ctr_stop = NULL;
|
||||
pmu->event_map = pmu_legacy_event_map;
|
||||
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
|
||||
pmu->ctr_get_width = NULL;
|
||||
pmu->ctr_clear_idx = NULL;
|
||||
pmu->ctr_read = pmu_legacy_read_ctr;
|
||||
|
||||
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
|
||||
}
|
||||
|
||||
static int pmu_legacy_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct riscv_pmu *pmu = NULL;
|
||||
|
||||
pmu = riscv_pmu_alloc();
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
pmu_legacy_init(pmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver pmu_legacy_driver = {
|
||||
.probe = pmu_legacy_device_probe,
|
||||
.driver = {
|
||||
.name = RISCV_PMU_LEGACY_PDEV_NAME,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init riscv_pmu_legacy_devinit(void)
|
||||
{
|
||||
int ret;
|
||||
struct platform_device *pdev;
|
||||
|
||||
if (likely(pmu_init_done))
|
||||
return 0;
|
||||
|
||||
ret = platform_driver_register(&pmu_legacy_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pdev = platform_device_register_simple(RISCV_PMU_LEGACY_PDEV_NAME, -1, NULL, 0);
|
||||
if (IS_ERR(pdev)) {
|
||||
platform_driver_unregister(&pmu_legacy_driver);
|
||||
return PTR_ERR(pdev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
late_initcall(riscv_pmu_legacy_devinit);
|
||||
|
||||
void riscv_pmu_legacy_skip_init(void)
|
||||
{
|
||||
pmu_init_done = true;
|
||||
}
|
790
drivers/perf/riscv_pmu_sbi.c
Normal file
790
drivers/perf/riscv_pmu_sbi.c
Normal file
@ -0,0 +1,790 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* RISC-V performance counter support.
|
||||
*
|
||||
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
|
||||
*
|
||||
* This code is based on ARM perf event code which is in turn based on
|
||||
* sparc64 and x86 code.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
|
||||
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/perf/riscv_pmu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
union sbi_pmu_ctr_info {
|
||||
unsigned long value;
|
||||
struct {
|
||||
unsigned long csr:12;
|
||||
unsigned long width:6;
|
||||
#if __riscv_xlen == 32
|
||||
unsigned long reserved:13;
|
||||
#else
|
||||
unsigned long reserved:45;
|
||||
#endif
|
||||
unsigned long type:1;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* RISC-V doesn't have hetergenous harts yet. This need to be part of
|
||||
* per_cpu in case of harts with different pmu counters
|
||||
*/
|
||||
static union sbi_pmu_ctr_info *pmu_ctr_list;
|
||||
static unsigned int riscv_pmu_irq;
|
||||
|
||||
struct sbi_pmu_event_data {
|
||||
union {
|
||||
union {
|
||||
struct hw_gen_event {
|
||||
uint32_t event_code:16;
|
||||
uint32_t event_type:4;
|
||||
uint32_t reserved:12;
|
||||
} hw_gen_event;
|
||||
struct hw_cache_event {
|
||||
uint32_t result_id:1;
|
||||
uint32_t op_id:2;
|
||||
uint32_t cache_id:13;
|
||||
uint32_t event_type:4;
|
||||
uint32_t reserved:12;
|
||||
} hw_cache_event;
|
||||
};
|
||||
uint32_t event_idx;
|
||||
};
|
||||
};
|
||||
|
||||
static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_CPU_CYCLES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_INSTRUCTIONS,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_CACHE_REFERENCES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_CACHE_MISSES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_BRANCH_INSTRUCTIONS,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_BRANCH_MISSES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_BUS_CYCLES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_STALLED_CYCLES_BACKEND,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
[PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_REF_CPU_CYCLES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
|
||||
C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
|
||||
C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
|
||||
C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static int pmu_sbi_ctr_get_width(int idx)
|
||||
{
|
||||
return pmu_ctr_list[idx].width;
|
||||
}
|
||||
|
||||
static bool pmu_sbi_ctr_is_fw(int cidx)
|
||||
{
|
||||
union sbi_pmu_ctr_info *info;
|
||||
|
||||
info = &pmu_ctr_list[cidx];
|
||||
if (!info)
|
||||
return false;
|
||||
|
||||
return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
|
||||
}
|
||||
|
||||
static int pmu_sbi_ctr_get_idx(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
|
||||
struct sbiret ret;
|
||||
int idx;
|
||||
uint64_t cbase = 0;
|
||||
uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
|
||||
unsigned long cflags = 0;
|
||||
|
||||
if (event->attr.exclude_kernel)
|
||||
cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
|
||||
if (event->attr.exclude_user)
|
||||
cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
|
||||
|
||||
/* retrieve the available counter index */
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
|
||||
cflags, hwc->event_base, hwc->config, 0);
|
||||
if (ret.error) {
|
||||
pr_debug("Not able to find a counter for event %lx config %llx\n",
|
||||
hwc->event_base, hwc->config);
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
}
|
||||
|
||||
idx = ret.value;
|
||||
if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value)
|
||||
return -ENOENT;
|
||||
|
||||
/* Additional sanity check for the counter id */
|
||||
if (pmu_sbi_ctr_is_fw(idx)) {
|
||||
if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
|
||||
return idx;
|
||||
} else {
|
||||
if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
|
||||
return idx;
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
|
||||
{
|
||||
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
|
||||
int idx = hwc->idx;
|
||||
|
||||
if (pmu_sbi_ctr_is_fw(idx))
|
||||
clear_bit(idx, cpuc->used_fw_ctrs);
|
||||
else
|
||||
clear_bit(idx, cpuc->used_hw_ctrs);
|
||||
}
|
||||
|
||||
static int pmu_event_find_cache(u64 config)
|
||||
{
|
||||
unsigned int cache_type, cache_op, cache_result, ret;
|
||||
|
||||
cache_type = (config >> 0) & 0xff;
|
||||
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cache_op = (config >> 8) & 0xff;
|
||||
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cache_result = (config >> 16) & 0xff;
|
||||
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool pmu_sbi_is_fw_event(struct perf_event *event)
|
||||
{
|
||||
u32 type = event->attr.type;
|
||||
u64 config = event->attr.config;
|
||||
|
||||
if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
|
||||
{
|
||||
u32 type = event->attr.type;
|
||||
u64 config = event->attr.config;
|
||||
int bSoftware;
|
||||
u64 raw_config_val;
|
||||
int ret;
|
||||
|
||||
switch (type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
if (config >= PERF_COUNT_HW_MAX)
|
||||
return -EINVAL;
|
||||
ret = pmu_hw_event_map[event->attr.config].event_idx;
|
||||
break;
|
||||
case PERF_TYPE_HW_CACHE:
|
||||
ret = pmu_event_find_cache(config);
|
||||
break;
|
||||
case PERF_TYPE_RAW:
|
||||
/*
|
||||
* As per SBI specification, the upper 16 bits must be unused for
|
||||
* a raw event. Use the MSB (63b) to distinguish between hardware
|
||||
* raw event and firmware events.
|
||||
*/
|
||||
bSoftware = config >> 63;
|
||||
raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
|
||||
if (bSoftware) {
|
||||
if (raw_config_val < SBI_PMU_FW_MAX)
|
||||
ret = (raw_config_val & 0xFFFF) |
|
||||
(SBI_PMU_EVENT_TYPE_FW << 16);
|
||||
else
|
||||
return -EINVAL;
|
||||
} else {
|
||||
ret = RISCV_PMU_RAW_EVENT_IDX;
|
||||
*econfig = raw_config_val;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 pmu_sbi_ctr_read(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
struct sbiret ret;
|
||||
union sbi_pmu_ctr_info info;
|
||||
u64 val = 0;
|
||||
|
||||
if (pmu_sbi_is_fw_event(event)) {
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
|
||||
hwc->idx, 0, 0, 0, 0, 0);
|
||||
if (!ret.error)
|
||||
val = ret.value;
|
||||
} else {
|
||||
info = pmu_ctr_list[idx];
|
||||
val = riscv_pmu_ctr_read_csr(info.csr);
|
||||
if (IS_ENABLED(CONFIG_32BIT))
|
||||
val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
|
||||
{
|
||||
struct sbiret ret;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
|
||||
1, flag, ival, ival >> 32, 0);
|
||||
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
|
||||
pr_err("Starting counter idx %d failed with error %d\n",
|
||||
hwc->idx, sbi_err_map_linux_errno(ret.error));
|
||||
}
|
||||
|
||||
static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
|
||||
{
|
||||
struct sbiret ret;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
|
||||
if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
|
||||
flag != SBI_PMU_STOP_FLAG_RESET)
|
||||
pr_err("Stopping counter idx %d failed with error %d\n",
|
||||
hwc->idx, sbi_err_map_linux_errno(ret.error));
|
||||
}
|
||||
|
||||
static int pmu_sbi_find_num_ctrs(void)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
|
||||
if (!ret.error)
|
||||
return ret.value;
|
||||
else
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
}
|
||||
|
||||
static int pmu_sbi_get_ctrinfo(int nctr)
|
||||
{
|
||||
struct sbiret ret;
|
||||
int i, num_hw_ctr = 0, num_fw_ctr = 0;
|
||||
union sbi_pmu_ctr_info cinfo;
|
||||
|
||||
pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
|
||||
if (!pmu_ctr_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i <= nctr; i++) {
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
/* The logical counter ids are not expected to be contiguous */
|
||||
continue;
|
||||
cinfo.value = ret.value;
|
||||
if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
|
||||
num_fw_ctr++;
|
||||
else
|
||||
num_hw_ctr++;
|
||||
pmu_ctr_list[i].value = cinfo.value;
|
||||
}
|
||||
|
||||
pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
|
||||
{
|
||||
/**
|
||||
* No need to check the error because we are disabling all the counters
|
||||
* which may include counters that are not enabled yet.
|
||||
*/
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
|
||||
0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
|
||||
|
||||
/* No need to check the error here as we can't do anything about the error */
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
|
||||
cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function starts all the used counters in two step approach.
|
||||
* Any counter that did not overflow can be start in a single step
|
||||
* while the overflowed counters need to be started with updated initialization
|
||||
* value.
|
||||
*/
|
||||
static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
|
||||
unsigned long ctr_ovf_mask)
|
||||
{
|
||||
int idx = 0;
|
||||
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
|
||||
struct perf_event *event;
|
||||
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
|
||||
unsigned long ctr_start_mask = 0;
|
||||
uint64_t max_period;
|
||||
struct hw_perf_event *hwc;
|
||||
u64 init_val = 0;
|
||||
|
||||
ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
|
||||
|
||||
/* Start all the counters that did not overflow in a single shot */
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
|
||||
0, 0, 0, 0);
|
||||
|
||||
/* Reinitialize and start all the counter that overflowed */
|
||||
while (ctr_ovf_mask) {
|
||||
if (ctr_ovf_mask & 0x01) {
|
||||
event = cpu_hw_evt->events[idx];
|
||||
hwc = &event->hw;
|
||||
max_period = riscv_pmu_ctr_get_width_mask(event);
|
||||
init_val = local64_read(&hwc->prev_count) & max_period;
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
|
||||
flag, init_val, 0, 0);
|
||||
}
|
||||
ctr_ovf_mask = ctr_ovf_mask >> 1;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
|
||||
{
|
||||
struct perf_sample_data data;
|
||||
struct pt_regs *regs;
|
||||
struct hw_perf_event *hw_evt;
|
||||
union sbi_pmu_ctr_info *info;
|
||||
int lidx, hidx, fidx;
|
||||
struct riscv_pmu *pmu;
|
||||
struct perf_event *event;
|
||||
unsigned long overflow;
|
||||
unsigned long overflowed_ctrs = 0;
|
||||
struct cpu_hw_events *cpu_hw_evt = dev;
|
||||
|
||||
if (WARN_ON_ONCE(!cpu_hw_evt))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* Firmware counter don't support overflow yet */
|
||||
fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
|
||||
event = cpu_hw_evt->events[fidx];
|
||||
if (!event) {
|
||||
csr_clear(CSR_SIP, SIP_LCOFIP);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
pmu = to_riscv_pmu(event->pmu);
|
||||
pmu_sbi_stop_hw_ctrs(pmu);
|
||||
|
||||
/* Overflow status register should only be read after counter are stopped */
|
||||
overflow = csr_read(CSR_SSCOUNTOVF);
|
||||
|
||||
/**
|
||||
* Overflow interrupt pending bit should only be cleared after stopping
|
||||
* all the counters to avoid any race condition.
|
||||
*/
|
||||
csr_clear(CSR_SIP, SIP_LCOFIP);
|
||||
|
||||
/* No overflow bit is set */
|
||||
if (!overflow)
|
||||
return IRQ_NONE;
|
||||
|
||||
regs = get_irq_regs();
|
||||
|
||||
for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
|
||||
struct perf_event *event = cpu_hw_evt->events[lidx];
|
||||
|
||||
/* Skip if invalid event or user did not request a sampling */
|
||||
if (!event || !is_sampling_event(event))
|
||||
continue;
|
||||
|
||||
info = &pmu_ctr_list[lidx];
|
||||
/* Do a sanity check */
|
||||
if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
|
||||
continue;
|
||||
|
||||
/* compute hardware counter index */
|
||||
hidx = info->csr - CSR_CYCLE;
|
||||
/* check if the corresponding bit is set in sscountovf */
|
||||
if (!(overflow & (1 << hidx)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Keep a track of overflowed counters so that they can be started
|
||||
* with updated initial value.
|
||||
*/
|
||||
overflowed_ctrs |= 1 << lidx;
|
||||
hw_evt = &event->hw;
|
||||
riscv_pmu_event_update(event);
|
||||
perf_sample_data_init(&data, 0, hw_evt->last_period);
|
||||
if (riscv_pmu_event_set_period(event)) {
|
||||
/*
|
||||
* Unlike other ISAs, RISC-V don't have to disable interrupts
|
||||
* to avoid throttling here. As per the specification, the
|
||||
* interrupt remains disabled until the OF bit is set.
|
||||
* Interrupts are enabled again only during the start.
|
||||
* TODO: We will need to stop the guest counters once
|
||||
* virtualization support is added.
|
||||
*/
|
||||
perf_event_overflow(event, &data, regs);
|
||||
}
|
||||
}
|
||||
pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
|
||||
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
|
||||
|
||||
/* Enable the access for TIME csr only from the user mode now */
|
||||
csr_write(CSR_SCOUNTEREN, 0x2);
|
||||
|
||||
/* Stop all the counters so that they can be enabled from perf */
|
||||
pmu_sbi_stop_all(pmu);
|
||||
|
||||
if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
|
||||
cpu_hw_evt->irq = riscv_pmu_irq;
|
||||
csr_clear(CSR_IP, BIT(RV_IRQ_PMU));
|
||||
csr_set(CSR_IE, BIT(RV_IRQ_PMU));
|
||||
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
|
||||
disable_percpu_irq(riscv_pmu_irq);
|
||||
csr_clear(CSR_IE, BIT(RV_IRQ_PMU));
|
||||
}
|
||||
|
||||
/* Disable all counters access for user mode now */
|
||||
csr_write(CSR_SCOUNTEREN, 0x0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
|
||||
struct device_node *cpu, *child;
|
||||
struct irq_domain *domain = NULL;
|
||||
|
||||
if (!riscv_isa_extension_available(NULL, SSCOFPMF))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for_each_of_cpu_node(cpu) {
|
||||
child = of_get_compatible_child(cpu, "riscv,cpu-intc");
|
||||
if (!child) {
|
||||
pr_err("Failed to find INTC node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
domain = irq_find_host(child);
|
||||
of_node_put(child);
|
||||
if (domain)
|
||||
break;
|
||||
}
|
||||
if (!domain) {
|
||||
pr_err("Failed to find INTC IRQ root domain\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
riscv_pmu_irq = irq_create_mapping(domain, RV_IRQ_PMU);
|
||||
if (!riscv_pmu_irq) {
|
||||
pr_err("Failed to map PMU interrupt for node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
|
||||
if (ret) {
|
||||
pr_err("registering percpu irq failed [%d]\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_sbi_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct riscv_pmu *pmu = NULL;
|
||||
int num_counters;
|
||||
int ret = -ENODEV;
|
||||
|
||||
pr_info("SBI PMU extension is available\n");
|
||||
pmu = riscv_pmu_alloc();
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
|
||||
num_counters = pmu_sbi_find_num_ctrs();
|
||||
if (num_counters < 0) {
|
||||
pr_err("SBI PMU extension doesn't provide any counters\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* cache all the information about counters now */
|
||||
if (pmu_sbi_get_ctrinfo(num_counters))
|
||||
goto out_free;
|
||||
|
||||
ret = pmu_sbi_setup_irqs(pmu, pdev);
|
||||
if (ret < 0) {
|
||||
pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
|
||||
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
|
||||
}
|
||||
pmu->num_counters = num_counters;
|
||||
pmu->ctr_start = pmu_sbi_ctr_start;
|
||||
pmu->ctr_stop = pmu_sbi_ctr_stop;
|
||||
pmu->event_map = pmu_sbi_event_map;
|
||||
pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
|
||||
pmu->ctr_get_width = pmu_sbi_ctr_get_width;
|
||||
pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
|
||||
pmu->ctr_read = pmu_sbi_ctr_read;
|
||||
|
||||
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
|
||||
if (ret) {
|
||||
cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree(pmu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver pmu_sbi_driver = {
|
||||
.probe = pmu_sbi_device_probe,
|
||||
.driver = {
|
||||
.name = RISCV_PMU_PDEV_NAME,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init pmu_sbi_devinit(void)
|
||||
{
|
||||
int ret;
|
||||
struct platform_device *pdev;
|
||||
|
||||
if (sbi_spec_version < sbi_mk_version(0, 3) ||
|
||||
sbi_probe_extension(SBI_EXT_PMU) <= 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
|
||||
"perf/riscv/pmu:starting",
|
||||
pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
|
||||
if (ret) {
|
||||
pr_err("CPU hotplug notifier could not be registered: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = platform_driver_register(&pmu_sbi_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pdev = platform_device_register_simple(RISCV_PMU_PDEV_NAME, -1, NULL, 0);
|
||||
if (IS_ERR(pdev)) {
|
||||
platform_driver_unregister(&pmu_sbi_driver);
|
||||
return PTR_ERR(pdev);
|
||||
}
|
||||
|
||||
/* Notify legacy implementation that SBI pmu is available*/
|
||||
riscv_pmu_legacy_skip_init();
|
||||
|
||||
return ret;
|
||||
}
|
||||
device_initcall(pmu_sbi_devinit)
|
@ -165,6 +165,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
|
||||
CPUHP_AP_PERF_ARM_ACPI_STARTING,
|
||||
CPUHP_AP_PERF_ARM_STARTING,
|
||||
CPUHP_AP_PERF_RISCV_STARTING,
|
||||
CPUHP_AP_ARM_L2X0_STARTING,
|
||||
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
|
75
include/linux/perf/riscv_pmu.h
Normal file
75
include/linux/perf/riscv_pmu.h
Normal file
@ -0,0 +1,75 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2018 SiFive
|
||||
* Copyright (C) 2018 Andes Technology Corporation
|
||||
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_PERF_EVENT_H
|
||||
#define _ASM_RISCV_PERF_EVENT_H
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_PMU
|
||||
|
||||
/*
|
||||
* The RISCV_MAX_COUNTERS parameter should be specified.
|
||||
*/
|
||||
|
||||
#define RISCV_MAX_COUNTERS 64
|
||||
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
|
||||
#define RISCV_PMU_PDEV_NAME "riscv-pmu"
|
||||
#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
|
||||
|
||||
#define RISCV_PMU_STOP_FLAG_RESET 1
|
||||
|
||||
struct cpu_hw_events {
|
||||
/* currently enabled events */
|
||||
int n_events;
|
||||
/* Counter overflow interrupt */
|
||||
int irq;
|
||||
/* currently enabled events */
|
||||
struct perf_event *events[RISCV_MAX_COUNTERS];
|
||||
/* currently enabled hardware counters */
|
||||
DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
|
||||
/* currently enabled firmware counters */
|
||||
DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
|
||||
};
|
||||
|
||||
struct riscv_pmu {
|
||||
struct pmu pmu;
|
||||
char *name;
|
||||
|
||||
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
||||
|
||||
int num_counters;
|
||||
u64 (*ctr_read)(struct perf_event *event);
|
||||
int (*ctr_get_idx)(struct perf_event *event);
|
||||
int (*ctr_get_width)(int idx);
|
||||
void (*ctr_clear_idx)(struct perf_event *event);
|
||||
void (*ctr_start)(struct perf_event *event, u64 init_val);
|
||||
void (*ctr_stop)(struct perf_event *event, unsigned long flag);
|
||||
int (*event_map)(struct perf_event *event, u64 *config);
|
||||
|
||||
struct cpu_hw_events __percpu *hw_events;
|
||||
struct hlist_node node;
|
||||
};
|
||||
|
||||
#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
|
||||
unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
|
||||
int riscv_pmu_event_set_period(struct perf_event *event);
|
||||
uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
|
||||
u64 riscv_pmu_event_update(struct perf_event *event);
|
||||
#ifdef CONFIG_RISCV_PMU_LEGACY
|
||||
void riscv_pmu_legacy_skip_init(void);
|
||||
#else
|
||||
static inline void riscv_pmu_legacy_skip_init(void) {};
|
||||
#endif
|
||||
struct riscv_pmu *riscv_pmu_alloc(void);
|
||||
|
||||
#endif /* CONFIG_RISCV_PMU */
|
||||
|
||||
#endif /* _ASM_RISCV_PERF_EVENT_H */
|
Loading…
Reference in New Issue
Block a user