mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
perf/core improvements and fixes:
perf trace: Arnaldo Carvalho de Melo: - Reuse the strace-like syscall_arg_fmt->scnprintf() beautification routines (convert integer arguments into strings, like open flags, etc) in tracepoint arguments. For now the type based scnprintf routines (pid_t, umode_t, etc) and the ones based in well known arg name based ("fd", etc) gets associated with tracepoint args of that type. A tracepoint only arg, "msr", for the msr:{write,read}_msr gets added as an initial step. - Introduce syscall_arg_fmt->strtoul() methods to be the reverse operation of ->scnprintf(), i.e. to go from a string to an integer. - Implement --filter, just like in 'perf record', that affects the tracepoint events specied thus far in the command line, use the ->strtoul() methods to allow strings in tables associated with beautifiers to the integers the in-kernel tracepoint (eBPF later) filters expect, e.g.: # perf trace --max-events 1 -e sched:*ipi --filter="cpu==1 || cpu==2" 0.000 as/24630 sched:sched_wake_idle_without_ipi(cpu: 1) # # perf trace --max-events 1 --max-stack=32 -e msr:* --filter="msr==IA32_TSC_DEADLINE" 207.000 cc1/19963 msr:write_msr(msr: IA32_TSC_DEADLINE, val: 5442316760822) do_trace_write_msr ([kernel.kallsyms]) do_trace_write_msr ([kernel.kallsyms]) lapic_next_deadline ([kernel.kallsyms]) clockevents_program_event ([kernel.kallsyms]) hrtimer_interrupt ([kernel.kallsyms]) smp_apic_timer_interrupt ([kernel.kallsyms]) apic_timer_interrupt ([kernel.kallsyms]) [0x6ff66c] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x7047c3] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x707708] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) execute_one_pass (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x4f3d37] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x4f3d49] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) execute_pass_list (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) cgraph_node::expand (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x2625b4] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) symbol_table::finalize_compilation_unit (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x5ae8b9] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) toplev::main (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) main (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x26b6a] (/usr/lib/x86_64-linux-gnu/libc-2.29.so) # # perf trace --max-events 8 -e msr:* --filter="msr==IA32_SPEC_CTRL" 0.000 :13281/13281 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.063 migration/3/25 msr:write_msr(msr: IA32_SPEC_CTRL) 0.217 kworker/u16:1-/4826 msr:write_msr(msr: IA32_SPEC_CTRL) 0.687 rcu_sched/11 msr:write_msr(msr: IA32_SPEC_CTRL) 0.696 :13280/13280 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.305 :13281/13281 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.355 :13274/13274 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 2.743 kworker/u16:0-/6711 msr:write_msr(msr: IA32_SPEC_CTRL) # # perf trace --max-events 8 --cpu 1 -e msr:* --filter="msr!=IA32_SPEC_CTRL && msr!=IA32_TSC_DEADLINE && msr != FS_BASE" 0.000 mtr-packet/30819 msr:write_msr(msr: 0x830, val: 68719479037) 0.096 :0/0 msr:read_msr(msr: IA32_TSC_ADJUST) 238.925 mtr-packet/30819 msr:write_msr(msr: 0x830, val: 8589936893) 511.010 :0/0 msr:write_msr(msr: 0x830, val: 68719479037) 1005.052 :0/0 msr:read_msr(msr: IA32_TSC_ADJUST) 1235.131 CPU 0/KVM/3750 msr:write_msr(msr: 0x830, val: 4294969595) 1235.195 CPU 0/KVM/3750 msr:read_msr(msr: IA32_SYSENTER_ESP, val: -2199023037952) 1235.201 CPU 0/KVM/3750 msr:read_msr(msr: IA32_APICBASE, val: 4276096000) # - Default to not using libtraceevent and its plugins for beautifying tracepoint arguments, since now we're reusing the strace-like beatufiers. Use --libtraceevent_print (using just --libtrace is unambiguous and can be used as a short hand) to go back to those beautifiers. This will help in the transition, as can be seen in some of the sched tracepoints that still need some work in the libbeauty based mode: # trace --no-inherit -e msr:*,*sleep,sched:* sleep 1 0.000 ( ): sched:sched_waking(comm: "trace", pid: 3319 (trace), prio: 120, success: 1) 0.006 ( ): sched:sched_wakeup(comm: "trace", pid: 3319 (trace), prio: 120, success: 1) 0.348 ( ): sched:sched_process_exec(filename: 140212596720100, pid: 3319 (sleep), old_pid: 3319 (sleep)) 0.490 ( ): msr:write_msr(msr: FS_BASE, val: 139631189321088) 0.670 ( ): nanosleep(rqtp: 0x7ffc52c23bc0) ... 0.674 ( ): sched:sched_stat_runtime(comm: "sleep", pid: 3319 (sleep), runtime: 659259, vruntime: 78942418342) 0.675 ( ): sched:sched_switch(prev_comm: "sleep", prev_pid: 3319 (sleep), prev_prio: 120, prev_state: 1, next_comm: "swapper/0", next_prio: 120) 1001.059 ( ): sched:sched_waking(comm: "sleep", pid: 3319 (sleep), prio: 120, success: 1) 1001.098 ( ): sched:sched_wakeup(comm: "sleep", pid: 3319 (sleep), prio: 120, success: 1) 0.670 (1000.504 ms): ... [continued]: nanosleep()) = 0 1001.456 ( ): sched:sched_process_exit(comm: "sleep", pid: 3319 (sleep), prio: 120) # trace --libtrace --no-inherit -e msr:*,*sleep,sched:* sleep 1 # trace --libtrace --no-inherit -e msr:*,*sleep,sched:* sleep 1 0.000 ( ): sched:sched_waking(comm=trace pid=3323 prio=120 target_cpu=000) 0.007 ( ): sched:sched_wakeup(comm=trace pid=3323 prio=120 target_cpu=000) 0.382 ( ): sched:sched_process_exec(filename=/usr/bin/sleep pid=3323 old_pid=3323) 0.525 ( ): msr:write_msr(c0000100, value 7f5d508a0580) 0.713 ( ): nanosleep(rqtp: 0x7fff487fb4a0) ... 0.717 ( ): sched:sched_stat_runtime(comm=sleep pid=3323 runtime=617722 [ns] vruntime=78957731636 [ns]) 0.719 ( ): sched:sched_switch(prev_comm=sleep prev_pid=3323 prev_prio=120 prev_state=S ==> next_comm=swapper/0 next_pid=0 next_prio=120) 1001.117 ( ): sched:sched_waking(comm=sleep pid=3323 prio=120 target_cpu=000) 1001.157 ( ): sched:sched_wakeup(comm=sleep pid=3323 prio=120 target_cpu=000) 0.713 (1000.522 ms): ... [continued]: nanosleep()) = 0 1001.538 ( ): sched:sched_process_exit(comm=sleep pid=3323 prio=120) # - Make -v (verbose) mode be honoured for .perfconfig based trace.add_events, to help in diagnosing problems with building eBPF events (-e source.c). - When using eBPF syscall payload augmentation do not show strace-like syscalls when all the user specified was some tracepoint event, bringing the behaviour in line with that of when not using eBPF augmentation. Intel PT: exported-sql-viewer GUI: Adrian Hunter: - Add LookupModel, HBoxLayout, VBoxLayout, global time range calculations so as to add a time chart by CPU. perf script: Andi Kleen: - Allow --time (to specify a time span of interest) with --reltime perf diff: Jin Yao: - Report noise for cycles diff, i.e. a histogram + stddev. (timestamps relative to start). perf annotate: Arnaldo Carvalho de Melo: - Initialize env->cpuid when running in live mode (perf top), as it is used in some of the per arch annotation init routines. samples bpf: Björn Töpel: - Fixup fallout of using tools/perf/perf-sys. from outside tools/perf. Core: Ian Rogers: - Avoid 'sample_reg_masks' being const + weak, as this breaks with some compilers that constant-propagate from the weak symbol. libperf: - First part of moving the perf_mmap class from tools/perf to libperf. - Propagate CFLAGS to libperf from the tools/perf Makefile. Vendor events: John Garry: - Add entry in MAINTAINERS with reviewers for the for perf tool arm64 pmu-events files. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCXaDUqgAKCRCyPKLppCJ+ J5ypAP9spIHEx/thoT72iaApkdD9tOXls4lMTUFhOW39rrZV1AD9F3+3kfQ62kav 0j1sZGd7s5sDiLr1joJCAFiVPZc86wo= =r+Ik -----END PGP SIGNATURE----- Merge tag 'perf-core-for-mingo-5.5-20191011' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: perf trace: Arnaldo Carvalho de Melo: - Reuse the strace-like syscall_arg_fmt->scnprintf() beautification routines (convert integer arguments into strings, like open flags, etc) in tracepoint arguments. For now the type based scnprintf routines (pid_t, umode_t, etc) and the ones based in well known arg name based ("fd", etc) gets associated with tracepoint args of that type. A tracepoint only arg, "msr", for the msr:{write,read}_msr gets added as an initial step. - Introduce syscall_arg_fmt->strtoul() methods to be the reverse operation of ->scnprintf(), i.e. to go from a string to an integer. - Implement --filter, just like in 'perf record', that affects the tracepoint events specied thus far in the command line, use the ->strtoul() methods to allow strings in tables associated with beautifiers to the integers the in-kernel tracepoint (eBPF later) filters expect, e.g.: # perf trace --max-events 1 -e sched:*ipi --filter="cpu==1 || cpu==2" 0.000 as/24630 sched:sched_wake_idle_without_ipi(cpu: 1) # # perf trace --max-events 1 --max-stack=32 -e msr:* --filter="msr==IA32_TSC_DEADLINE" 207.000 cc1/19963 msr:write_msr(msr: IA32_TSC_DEADLINE, val: 5442316760822) do_trace_write_msr ([kernel.kallsyms]) do_trace_write_msr ([kernel.kallsyms]) lapic_next_deadline ([kernel.kallsyms]) clockevents_program_event ([kernel.kallsyms]) hrtimer_interrupt ([kernel.kallsyms]) smp_apic_timer_interrupt ([kernel.kallsyms]) apic_timer_interrupt ([kernel.kallsyms]) [0x6ff66c] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x7047c3] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x707708] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) execute_one_pass (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x4f3d37] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x4f3d49] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) execute_pass_list (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) cgraph_node::expand (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x2625b4] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) symbol_table::finalize_compilation_unit (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x5ae8b9] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) toplev::main (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) main (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x26b6a] (/usr/lib/x86_64-linux-gnu/libc-2.29.so) # # perf trace --max-events 8 -e msr:* --filter="msr==IA32_SPEC_CTRL" 0.000 :13281/13281 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.063 migration/3/25 msr:write_msr(msr: IA32_SPEC_CTRL) 0.217 kworker/u16:1-/4826 msr:write_msr(msr: IA32_SPEC_CTRL) 0.687 rcu_sched/11 msr:write_msr(msr: IA32_SPEC_CTRL) 0.696 :13280/13280 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.305 :13281/13281 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.355 :13274/13274 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 2.743 kworker/u16:0-/6711 msr:write_msr(msr: IA32_SPEC_CTRL) # # perf trace --max-events 8 --cpu 1 -e msr:* --filter="msr!=IA32_SPEC_CTRL && msr!=IA32_TSC_DEADLINE && msr != FS_BASE" 0.000 mtr-packet/30819 msr:write_msr(msr: 0x830, val: 68719479037) 0.096 :0/0 msr:read_msr(msr: IA32_TSC_ADJUST) 238.925 mtr-packet/30819 msr:write_msr(msr: 0x830, val: 8589936893) 511.010 :0/0 msr:write_msr(msr: 0x830, val: 68719479037) 1005.052 :0/0 msr:read_msr(msr: IA32_TSC_ADJUST) 1235.131 CPU 0/KVM/3750 msr:write_msr(msr: 0x830, val: 4294969595) 1235.195 CPU 0/KVM/3750 msr:read_msr(msr: IA32_SYSENTER_ESP, val: -2199023037952) 1235.201 CPU 0/KVM/3750 msr:read_msr(msr: IA32_APICBASE, val: 4276096000) # - Default to not using libtraceevent and its plugins for beautifying tracepoint arguments, since now we're reusing the strace-like beatufiers. Use --libtraceevent_print (using just --libtrace is unambiguous and can be used as a short hand) to go back to those beautifiers. This will help in the transition, as can be seen in some of the sched tracepoints that still need some work in the libbeauty based mode: # trace --no-inherit -e msr:*,*sleep,sched:* sleep 1 0.000 ( ): sched:sched_waking(comm: "trace", pid: 3319 (trace), prio: 120, success: 1) 0.006 ( ): sched:sched_wakeup(comm: "trace", pid: 3319 (trace), prio: 120, success: 1) 0.348 ( ): sched:sched_process_exec(filename: 140212596720100, pid: 3319 (sleep), old_pid: 3319 (sleep)) 0.490 ( ): msr:write_msr(msr: FS_BASE, val: 139631189321088) 0.670 ( ): nanosleep(rqtp: 0x7ffc52c23bc0) ... 0.674 ( ): sched:sched_stat_runtime(comm: "sleep", pid: 3319 (sleep), runtime: 659259, vruntime: 78942418342) 0.675 ( ): sched:sched_switch(prev_comm: "sleep", prev_pid: 3319 (sleep), prev_prio: 120, prev_state: 1, next_comm: "swapper/0", next_prio: 120) 1001.059 ( ): sched:sched_waking(comm: "sleep", pid: 3319 (sleep), prio: 120, success: 1) 1001.098 ( ): sched:sched_wakeup(comm: "sleep", pid: 3319 (sleep), prio: 120, success: 1) 0.670 (1000.504 ms): ... [continued]: nanosleep()) = 0 1001.456 ( ): sched:sched_process_exit(comm: "sleep", pid: 3319 (sleep), prio: 120) # trace --libtrace --no-inherit -e msr:*,*sleep,sched:* sleep 1 # trace --libtrace --no-inherit -e msr:*,*sleep,sched:* sleep 1 0.000 ( ): sched:sched_waking(comm=trace pid=3323 prio=120 target_cpu=000) 0.007 ( ): sched:sched_wakeup(comm=trace pid=3323 prio=120 target_cpu=000) 0.382 ( ): sched:sched_process_exec(filename=/usr/bin/sleep pid=3323 old_pid=3323) 0.525 ( ): msr:write_msr(c0000100, value 7f5d508a0580) 0.713 ( ): nanosleep(rqtp: 0x7fff487fb4a0) ... 0.717 ( ): sched:sched_stat_runtime(comm=sleep pid=3323 runtime=617722 [ns] vruntime=78957731636 [ns]) 0.719 ( ): sched:sched_switch(prev_comm=sleep prev_pid=3323 prev_prio=120 prev_state=S ==> next_comm=swapper/0 next_pid=0 next_prio=120) 1001.117 ( ): sched:sched_waking(comm=sleep pid=3323 prio=120 target_cpu=000) 1001.157 ( ): sched:sched_wakeup(comm=sleep pid=3323 prio=120 target_cpu=000) 0.713 (1000.522 ms): ... [continued]: nanosleep()) = 0 1001.538 ( ): sched:sched_process_exit(comm=sleep pid=3323 prio=120) # - Make -v (verbose) mode be honoured for .perfconfig based trace.add_events, to help in diagnosing problems with building eBPF events (-e source.c). - When using eBPF syscall payload augmentation do not show strace-like syscalls when all the user specified was some tracepoint event, bringing the behaviour in line with that of when not using eBPF augmentation. Intel PT: exported-sql-viewer GUI: Adrian Hunter: - Add LookupModel, HBoxLayout, VBoxLayout, global time range calculations so as to add a time chart by CPU. perf script: Andi Kleen: - Allow --time (to specify a time span of interest) with --reltime perf diff: Jin Yao: - Report noise for cycles diff, i.e. a histogram + stddev. (timestamps relative to start). perf annotate: Arnaldo Carvalho de Melo: - Initialize env->cpuid when running in live mode (perf top), as it is used in some of the per arch annotation init routines. samples bpf: Björn Töpel: - Fixup fallout of using tools/perf/perf-sys. from outside tools/perf. Core: Ian Rogers: - Avoid 'sample_reg_masks' being const + weak, as this breaks with some compilers that constant-propagate from the weak symbol. libperf: - First part of moving the perf_mmap class from tools/perf to libperf. - Propagate CFLAGS to libperf from the tools/perf Makefile. Vendor events: John Garry: - Add entry in MAINTAINERS with reviewers for the for perf tool arm64 pmu-events files. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
39b656ee9f
@ -12769,6 +12769,13 @@ F: arch/*/events/*
|
||||
F: arch/*/events/*/*
|
||||
F: tools/perf/
|
||||
|
||||
PERFORMANCE EVENTS SUBSYSTEM ARM64 PMU EVENTS
|
||||
R: John Garry <john.garry@huawei.com>
|
||||
R: Will Deacon <will@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: tools/perf/pmu-events/arch/arm64/
|
||||
|
||||
PERSONALITY HANDLING
|
||||
M: Christoph Hellwig <hch@infradead.org>
|
||||
L: linux-abi-devel@lists.sourceforge.net
|
||||
|
@ -176,6 +176,7 @@ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
|
||||
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
|
||||
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
|
||||
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
|
||||
KBUILD_HOSTCFLAGS += -DHAVE_ATTR_TEST=0
|
||||
|
||||
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
|
||||
|
||||
|
857
tools/arch/x86/include/asm/msr-index.h
Normal file
857
tools/arch/x86/include/asm/msr-index.h
Normal file
@ -0,0 +1,857 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_X86_MSR_INDEX_H
|
||||
#define _ASM_X86_MSR_INDEX_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
/*
|
||||
* CPU model specific register (MSR) numbers.
|
||||
*
|
||||
* Do not add new entries to this file unless the definitions are shared
|
||||
* between multiple compilation units.
|
||||
*/
|
||||
|
||||
/* x86-64 specific MSRs */
|
||||
#define MSR_EFER 0xc0000080 /* extended feature register */
|
||||
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
|
||||
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
|
||||
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
|
||||
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
|
||||
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
|
||||
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
|
||||
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
|
||||
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
|
||||
|
||||
/* EFER bits: */
|
||||
#define _EFER_SCE 0 /* SYSCALL/SYSRET */
|
||||
#define _EFER_LME 8 /* Long mode enable */
|
||||
#define _EFER_LMA 10 /* Long mode active (read-only) */
|
||||
#define _EFER_NX 11 /* No execute enable */
|
||||
#define _EFER_SVME 12 /* Enable virtualization */
|
||||
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
|
||||
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
|
||||
|
||||
#define EFER_SCE (1<<_EFER_SCE)
|
||||
#define EFER_LME (1<<_EFER_LME)
|
||||
#define EFER_LMA (1<<_EFER_LMA)
|
||||
#define EFER_NX (1<<_EFER_NX)
|
||||
#define EFER_SVME (1<<_EFER_SVME)
|
||||
#define EFER_LMSLE (1<<_EFER_LMSLE)
|
||||
#define EFER_FFXSR (1<<_EFER_FFXSR)
|
||||
|
||||
/* Intel MSRs. Some also available on other CPUs */
|
||||
|
||||
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
||||
#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
|
||||
#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
|
||||
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||
|
||||
#define MSR_PPIN_CTL 0x0000004e
|
||||
#define MSR_PPIN 0x0000004f
|
||||
|
||||
#define MSR_IA32_PERFCTR0 0x000000c1
|
||||
#define MSR_IA32_PERFCTR1 0x000000c2
|
||||
#define MSR_FSB_FREQ 0x000000cd
|
||||
#define MSR_PLATFORM_INFO 0x000000ce
|
||||
#define MSR_PLATFORM_INFO_CPUID_FAULT_BIT 31
|
||||
#define MSR_PLATFORM_INFO_CPUID_FAULT BIT_ULL(MSR_PLATFORM_INFO_CPUID_FAULT_BIT)
|
||||
|
||||
#define MSR_IA32_UMWAIT_CONTROL 0xe1
|
||||
#define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0)
|
||||
#define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1)
|
||||
/*
|
||||
* The time field is bit[31:2], but representing a 32bit value with
|
||||
* bit[1:0] zero.
|
||||
*/
|
||||
#define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U)
|
||||
|
||||
#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2
|
||||
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
|
||||
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
|
||||
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
|
||||
#define SNB_C3_AUTO_UNDEMOTE (1UL << 27)
|
||||
#define SNB_C1_AUTO_UNDEMOTE (1UL << 28)
|
||||
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
|
||||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
|
||||
#define ARCH_CAP_SSB_NO BIT(4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
* attack, so no Speculative Store Bypass
|
||||
* control required.
|
||||
*/
|
||||
#define ARCH_CAP_MDS_NO BIT(5) /*
|
||||
* Not susceptible to
|
||||
* Microarchitectural Data
|
||||
* Sampling (MDS) vulnerabilities.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
* Writeback and invalidate the
|
||||
* L1 data cache.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||
#define MSR_IA32_SYSENTER_EIP 0x00000176
|
||||
|
||||
#define MSR_IA32_MCG_CAP 0x00000179
|
||||
#define MSR_IA32_MCG_STATUS 0x0000017a
|
||||
#define MSR_IA32_MCG_CTL 0x0000017b
|
||||
#define MSR_IA32_MCG_EXT_CTL 0x000004d0
|
||||
|
||||
#define MSR_OFFCORE_RSP_0 0x000001a6
|
||||
#define MSR_OFFCORE_RSP_1 0x000001a7
|
||||
#define MSR_TURBO_RATIO_LIMIT 0x000001ad
|
||||
#define MSR_TURBO_RATIO_LIMIT1 0x000001ae
|
||||
#define MSR_TURBO_RATIO_LIMIT2 0x000001af
|
||||
|
||||
#define MSR_LBR_SELECT 0x000001c8
|
||||
#define MSR_LBR_TOS 0x000001c9
|
||||
#define MSR_LBR_NHM_FROM 0x00000680
|
||||
#define MSR_LBR_NHM_TO 0x000006c0
|
||||
#define MSR_LBR_CORE_FROM 0x00000040
|
||||
#define MSR_LBR_CORE_TO 0x00000060
|
||||
|
||||
#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */
|
||||
#define LBR_INFO_MISPRED BIT_ULL(63)
|
||||
#define LBR_INFO_IN_TX BIT_ULL(62)
|
||||
#define LBR_INFO_ABORT BIT_ULL(61)
|
||||
#define LBR_INFO_CYCLES 0xffff
|
||||
|
||||
#define MSR_IA32_PEBS_ENABLE 0x000003f1
|
||||
#define MSR_PEBS_DATA_CFG 0x000003f2
|
||||
#define MSR_IA32_DS_AREA 0x00000600
|
||||
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
|
||||
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
|
||||
|
||||
#define MSR_IA32_RTIT_CTL 0x00000570
|
||||
#define RTIT_CTL_TRACEEN BIT(0)
|
||||
#define RTIT_CTL_CYCLEACC BIT(1)
|
||||
#define RTIT_CTL_OS BIT(2)
|
||||
#define RTIT_CTL_USR BIT(3)
|
||||
#define RTIT_CTL_PWR_EVT_EN BIT(4)
|
||||
#define RTIT_CTL_FUP_ON_PTW BIT(5)
|
||||
#define RTIT_CTL_FABRIC_EN BIT(6)
|
||||
#define RTIT_CTL_CR3EN BIT(7)
|
||||
#define RTIT_CTL_TOPA BIT(8)
|
||||
#define RTIT_CTL_MTC_EN BIT(9)
|
||||
#define RTIT_CTL_TSC_EN BIT(10)
|
||||
#define RTIT_CTL_DISRETC BIT(11)
|
||||
#define RTIT_CTL_PTW_EN BIT(12)
|
||||
#define RTIT_CTL_BRANCH_EN BIT(13)
|
||||
#define RTIT_CTL_MTC_RANGE_OFFSET 14
|
||||
#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
|
||||
#define RTIT_CTL_CYC_THRESH_OFFSET 19
|
||||
#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
|
||||
#define RTIT_CTL_PSB_FREQ_OFFSET 24
|
||||
#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
|
||||
#define RTIT_CTL_ADDR0_OFFSET 32
|
||||
#define RTIT_CTL_ADDR0 (0x0full << RTIT_CTL_ADDR0_OFFSET)
|
||||
#define RTIT_CTL_ADDR1_OFFSET 36
|
||||
#define RTIT_CTL_ADDR1 (0x0full << RTIT_CTL_ADDR1_OFFSET)
|
||||
#define RTIT_CTL_ADDR2_OFFSET 40
|
||||
#define RTIT_CTL_ADDR2 (0x0full << RTIT_CTL_ADDR2_OFFSET)
|
||||
#define RTIT_CTL_ADDR3_OFFSET 44
|
||||
#define RTIT_CTL_ADDR3 (0x0full << RTIT_CTL_ADDR3_OFFSET)
|
||||
#define MSR_IA32_RTIT_STATUS 0x00000571
|
||||
#define RTIT_STATUS_FILTEREN BIT(0)
|
||||
#define RTIT_STATUS_CONTEXTEN BIT(1)
|
||||
#define RTIT_STATUS_TRIGGEREN BIT(2)
|
||||
#define RTIT_STATUS_BUFFOVF BIT(3)
|
||||
#define RTIT_STATUS_ERROR BIT(4)
|
||||
#define RTIT_STATUS_STOPPED BIT(5)
|
||||
#define RTIT_STATUS_BYTECNT_OFFSET 32
|
||||
#define RTIT_STATUS_BYTECNT (0x1ffffull << RTIT_STATUS_BYTECNT_OFFSET)
|
||||
#define MSR_IA32_RTIT_ADDR0_A 0x00000580
|
||||
#define MSR_IA32_RTIT_ADDR0_B 0x00000581
|
||||
#define MSR_IA32_RTIT_ADDR1_A 0x00000582
|
||||
#define MSR_IA32_RTIT_ADDR1_B 0x00000583
|
||||
#define MSR_IA32_RTIT_ADDR2_A 0x00000584
|
||||
#define MSR_IA32_RTIT_ADDR2_B 0x00000585
|
||||
#define MSR_IA32_RTIT_ADDR3_A 0x00000586
|
||||
#define MSR_IA32_RTIT_ADDR3_B 0x00000587
|
||||
#define MSR_IA32_RTIT_CR3_MATCH 0x00000572
|
||||
#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560
|
||||
#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561
|
||||
|
||||
#define MSR_MTRRfix64K_00000 0x00000250
|
||||
#define MSR_MTRRfix16K_80000 0x00000258
|
||||
#define MSR_MTRRfix16K_A0000 0x00000259
|
||||
#define MSR_MTRRfix4K_C0000 0x00000268
|
||||
#define MSR_MTRRfix4K_C8000 0x00000269
|
||||
#define MSR_MTRRfix4K_D0000 0x0000026a
|
||||
#define MSR_MTRRfix4K_D8000 0x0000026b
|
||||
#define MSR_MTRRfix4K_E0000 0x0000026c
|
||||
#define MSR_MTRRfix4K_E8000 0x0000026d
|
||||
#define MSR_MTRRfix4K_F0000 0x0000026e
|
||||
#define MSR_MTRRfix4K_F8000 0x0000026f
|
||||
#define MSR_MTRRdefType 0x000002ff
|
||||
|
||||
#define MSR_IA32_CR_PAT 0x00000277
|
||||
|
||||
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
|
||||
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
|
||||
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
|
||||
#define MSR_IA32_LASTINTFROMIP 0x000001dd
|
||||
#define MSR_IA32_LASTINTTOIP 0x000001de
|
||||
|
||||
/* DEBUGCTLMSR bits (others vary by model): */
|
||||
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
|
||||
#define DEBUGCTLMSR_BTF_SHIFT 1
|
||||
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
|
||||
#define DEBUGCTLMSR_TR (1UL << 6)
|
||||
#define DEBUGCTLMSR_BTS (1UL << 7)
|
||||
#define DEBUGCTLMSR_BTINT (1UL << 8)
|
||||
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
|
||||
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
|
||||
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
|
||||
#define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12)
|
||||
#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
|
||||
#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
|
||||
|
||||
#define MSR_PEBS_FRONTEND 0x000003f7
|
||||
|
||||
#define MSR_IA32_POWER_CTL 0x000001fc
|
||||
|
||||
#define MSR_IA32_MC0_CTL 0x00000400
|
||||
#define MSR_IA32_MC0_STATUS 0x00000401
|
||||
#define MSR_IA32_MC0_ADDR 0x00000402
|
||||
#define MSR_IA32_MC0_MISC 0x00000403
|
||||
|
||||
/* C-state Residency Counters */
|
||||
#define MSR_PKG_C3_RESIDENCY 0x000003f8
|
||||
#define MSR_PKG_C6_RESIDENCY 0x000003f9
|
||||
#define MSR_ATOM_PKG_C6_RESIDENCY 0x000003fa
|
||||
#define MSR_PKG_C7_RESIDENCY 0x000003fa
|
||||
#define MSR_CORE_C3_RESIDENCY 0x000003fc
|
||||
#define MSR_CORE_C6_RESIDENCY 0x000003fd
|
||||
#define MSR_CORE_C7_RESIDENCY 0x000003fe
|
||||
#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff
|
||||
#define MSR_PKG_C2_RESIDENCY 0x0000060d
|
||||
#define MSR_PKG_C8_RESIDENCY 0x00000630
|
||||
#define MSR_PKG_C9_RESIDENCY 0x00000631
|
||||
#define MSR_PKG_C10_RESIDENCY 0x00000632
|
||||
|
||||
/* Interrupt Response Limit */
|
||||
#define MSR_PKGC3_IRTL 0x0000060a
|
||||
#define MSR_PKGC6_IRTL 0x0000060b
|
||||
#define MSR_PKGC7_IRTL 0x0000060c
|
||||
#define MSR_PKGC8_IRTL 0x00000633
|
||||
#define MSR_PKGC9_IRTL 0x00000634
|
||||
#define MSR_PKGC10_IRTL 0x00000635
|
||||
|
||||
/* Run Time Average Power Limiting (RAPL) Interface */
|
||||
|
||||
#define MSR_RAPL_POWER_UNIT 0x00000606
|
||||
|
||||
#define MSR_PKG_POWER_LIMIT 0x00000610
|
||||
#define MSR_PKG_ENERGY_STATUS 0x00000611
|
||||
#define MSR_PKG_PERF_STATUS 0x00000613
|
||||
#define MSR_PKG_POWER_INFO 0x00000614
|
||||
|
||||
#define MSR_DRAM_POWER_LIMIT 0x00000618
|
||||
#define MSR_DRAM_ENERGY_STATUS 0x00000619
|
||||
#define MSR_DRAM_PERF_STATUS 0x0000061b
|
||||
#define MSR_DRAM_POWER_INFO 0x0000061c
|
||||
|
||||
#define MSR_PP0_POWER_LIMIT 0x00000638
|
||||
#define MSR_PP0_ENERGY_STATUS 0x00000639
|
||||
#define MSR_PP0_POLICY 0x0000063a
|
||||
#define MSR_PP0_PERF_STATUS 0x0000063b
|
||||
|
||||
#define MSR_PP1_POWER_LIMIT 0x00000640
|
||||
#define MSR_PP1_ENERGY_STATUS 0x00000641
|
||||
#define MSR_PP1_POLICY 0x00000642
|
||||
|
||||
/* Config TDP MSRs */
|
||||
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
|
||||
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
|
||||
#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
|
||||
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
|
||||
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
|
||||
|
||||
#define MSR_PLATFORM_ENERGY_STATUS 0x0000064D
|
||||
|
||||
#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
|
||||
#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
|
||||
#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
|
||||
#define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
|
||||
|
||||
#define MSR_CORE_C1_RES 0x00000660
|
||||
#define MSR_MODULE_C6_RES_MS 0x00000664
|
||||
|
||||
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
|
||||
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
|
||||
|
||||
#define MSR_ATOM_CORE_RATIOS 0x0000066a
|
||||
#define MSR_ATOM_CORE_VIDS 0x0000066b
|
||||
#define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c
|
||||
#define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d
|
||||
|
||||
|
||||
#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
|
||||
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
|
||||
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
|
||||
|
||||
/* Hardware P state interface */
|
||||
#define MSR_PPERF 0x0000064e
|
||||
#define MSR_PERF_LIMIT_REASONS 0x0000064f
|
||||
#define MSR_PM_ENABLE 0x00000770
|
||||
#define MSR_HWP_CAPABILITIES 0x00000771
|
||||
#define MSR_HWP_REQUEST_PKG 0x00000772
|
||||
#define MSR_HWP_INTERRUPT 0x00000773
|
||||
#define MSR_HWP_REQUEST 0x00000774
|
||||
#define MSR_HWP_STATUS 0x00000777
|
||||
|
||||
/* CPUID.6.EAX */
|
||||
#define HWP_BASE_BIT (1<<7)
|
||||
#define HWP_NOTIFICATIONS_BIT (1<<8)
|
||||
#define HWP_ACTIVITY_WINDOW_BIT (1<<9)
|
||||
#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
|
||||
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
|
||||
|
||||
/* IA32_HWP_CAPABILITIES */
|
||||
#define HWP_HIGHEST_PERF(x) (((x) >> 0) & 0xff)
|
||||
#define HWP_GUARANTEED_PERF(x) (((x) >> 8) & 0xff)
|
||||
#define HWP_MOSTEFFICIENT_PERF(x) (((x) >> 16) & 0xff)
|
||||
#define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff)
|
||||
|
||||
/* IA32_HWP_REQUEST */
|
||||
#define HWP_MIN_PERF(x) (x & 0xff)
|
||||
#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
|
||||
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
|
||||
#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24)
|
||||
#define HWP_EPP_PERFORMANCE 0x00
|
||||
#define HWP_EPP_BALANCE_PERFORMANCE 0x80
|
||||
#define HWP_EPP_BALANCE_POWERSAVE 0xC0
|
||||
#define HWP_EPP_POWERSAVE 0xFF
|
||||
#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32)
|
||||
#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42)
|
||||
|
||||
/* IA32_HWP_STATUS */
|
||||
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
|
||||
#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4)
|
||||
|
||||
/* IA32_HWP_INTERRUPT */
|
||||
#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1)
|
||||
#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2)
|
||||
|
||||
#define MSR_AMD64_MC0_MASK 0xc0010044
|
||||
|
||||
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
|
||||
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
|
||||
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
|
||||
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
|
||||
|
||||
#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
|
||||
|
||||
/* These are consecutive and not in the normal 4er MCE bank block */
|
||||
#define MSR_IA32_MC0_CTL2 0x00000280
|
||||
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
|
||||
|
||||
#define MSR_P6_PERFCTR0 0x000000c1
|
||||
#define MSR_P6_PERFCTR1 0x000000c2
|
||||
#define MSR_P6_EVNTSEL0 0x00000186
|
||||
#define MSR_P6_EVNTSEL1 0x00000187
|
||||
|
||||
#define MSR_KNC_PERFCTR0 0x00000020
|
||||
#define MSR_KNC_PERFCTR1 0x00000021
|
||||
#define MSR_KNC_EVNTSEL0 0x00000028
|
||||
#define MSR_KNC_EVNTSEL1 0x00000029
|
||||
|
||||
/* Alternative perfctr range with full access. */
|
||||
#define MSR_IA32_PMC0 0x000004c1
|
||||
|
||||
/* Auto-reload via MSR instead of DS area */
|
||||
#define MSR_RELOAD_PMC0 0x000014c1
|
||||
#define MSR_RELOAD_FIXED_CTR0 0x00001309
|
||||
|
||||
/*
|
||||
* AMD64 MSRs. Not complete. See the architecture manual for a more
|
||||
* complete list.
|
||||
*/
|
||||
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
|
||||
#define MSR_AMD64_TSC_RATIO 0xc0000104
|
||||
#define MSR_AMD64_NB_CFG 0xc001001f
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_PATCH_LOADER 0xc0010020
|
||||
#define MSR_AMD_PERF_CTL 0xc0010062
|
||||
#define MSR_AMD_PERF_STATUS 0xc0010063
|
||||
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
|
||||
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
|
||||
#define MSR_AMD64_OSVW_STATUS 0xc0010141
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
#define MSR_AMD64_BU_CFG2 0xc001102a
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
|
||||
#define MSR_AMD64_IBSFETCH_REG_COUNT 3
|
||||
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
|
||||
#define MSR_AMD64_IBSOPCTL 0xc0011033
|
||||
#define MSR_AMD64_IBSOPRIP 0xc0011034
|
||||
#define MSR_AMD64_IBSOPDATA 0xc0011035
|
||||
#define MSR_AMD64_IBSOPDATA2 0xc0011036
|
||||
#define MSR_AMD64_IBSOPDATA3 0xc0011037
|
||||
#define MSR_AMD64_IBSDCLINAD 0xc0011038
|
||||
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
|
||||
#define MSR_AMD64_IBSOP_REG_COUNT 7
|
||||
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
|
||||
#define MSR_AMD64_IBSCTL 0xc001103a
|
||||
#define MSR_AMD64_IBSBRTARGET 0xc001103b
|
||||
#define MSR_AMD64_IBSOPDATA4 0xc001103d
|
||||
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
|
||||
#define MSR_AMD64_SEV 0xc0010131
|
||||
#define MSR_AMD64_SEV_ENABLED_BIT 0
|
||||
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
|
||||
|
||||
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
|
||||
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
|
||||
/* Fam 16h MSRs */
|
||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||
#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
|
||||
#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
|
||||
#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
|
||||
#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
|
||||
|
||||
/* Fam 15h MSRs */
|
||||
#define MSR_F15H_PERF_CTL 0xc0010200
|
||||
#define MSR_F15H_PERF_CTL0 MSR_F15H_PERF_CTL
|
||||
#define MSR_F15H_PERF_CTL1 (MSR_F15H_PERF_CTL + 2)
|
||||
#define MSR_F15H_PERF_CTL2 (MSR_F15H_PERF_CTL + 4)
|
||||
#define MSR_F15H_PERF_CTL3 (MSR_F15H_PERF_CTL + 6)
|
||||
#define MSR_F15H_PERF_CTL4 (MSR_F15H_PERF_CTL + 8)
|
||||
#define MSR_F15H_PERF_CTL5 (MSR_F15H_PERF_CTL + 10)
|
||||
|
||||
#define MSR_F15H_PERF_CTR 0xc0010201
|
||||
#define MSR_F15H_PERF_CTR0 MSR_F15H_PERF_CTR
|
||||
#define MSR_F15H_PERF_CTR1 (MSR_F15H_PERF_CTR + 2)
|
||||
#define MSR_F15H_PERF_CTR2 (MSR_F15H_PERF_CTR + 4)
|
||||
#define MSR_F15H_PERF_CTR3 (MSR_F15H_PERF_CTR + 6)
|
||||
#define MSR_F15H_PERF_CTR4 (MSR_F15H_PERF_CTR + 8)
|
||||
#define MSR_F15H_PERF_CTR5 (MSR_F15H_PERF_CTR + 10)
|
||||
|
||||
#define MSR_F15H_NB_PERF_CTL 0xc0010240
|
||||
#define MSR_F15H_NB_PERF_CTR 0xc0010241
|
||||
#define MSR_F15H_PTSC 0xc0010280
|
||||
#define MSR_F15H_IC_CFG 0xc0011021
|
||||
#define MSR_F15H_EX_CFG 0xc001102c
|
||||
|
||||
/* Fam 10h MSRs */
|
||||
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
|
||||
#define FAM10H_MMIO_CONF_ENABLE (1<<0)
|
||||
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
|
||||
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
|
||||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
|
||||
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
|
||||
#define MSR_FAM10H_NODE_ID 0xc001100c
|
||||
#define MSR_F10H_DECFG 0xc0011029
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
#define MSR_K8_TOP_MEM2 0xc001001d
|
||||
#define MSR_K8_SYSCFG 0xc0010010
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
|
||||
#define MSR_K8_INT_PENDING_MSG 0xc0010055
|
||||
/* C1E active bits in int pending message */
|
||||
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
|
||||
#define MSR_K8_TSEG_ADDR 0xc0010112
|
||||
#define MSR_K8_TSEG_MASK 0xc0010113
|
||||
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
|
||||
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
|
||||
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
|
||||
|
||||
/* K7 MSRs */
|
||||
#define MSR_K7_EVNTSEL0 0xc0010000
|
||||
#define MSR_K7_PERFCTR0 0xc0010004
|
||||
#define MSR_K7_EVNTSEL1 0xc0010001
|
||||
#define MSR_K7_PERFCTR1 0xc0010005
|
||||
#define MSR_K7_EVNTSEL2 0xc0010002
|
||||
#define MSR_K7_PERFCTR2 0xc0010006
|
||||
#define MSR_K7_EVNTSEL3 0xc0010003
|
||||
#define MSR_K7_PERFCTR3 0xc0010007
|
||||
#define MSR_K7_CLK_CTL 0xc001001b
|
||||
#define MSR_K7_HWCR 0xc0010015
|
||||
#define MSR_K7_HWCR_SMMLOCK_BIT 0
|
||||
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
|
||||
#define MSR_K7_FID_VID_CTL 0xc0010041
|
||||
#define MSR_K7_FID_VID_STATUS 0xc0010042
|
||||
|
||||
/* K6 MSRs */
|
||||
#define MSR_K6_WHCR 0xc0000082
|
||||
#define MSR_K6_UWCCR 0xc0000085
|
||||
#define MSR_K6_EPMR 0xc0000086
|
||||
#define MSR_K6_PSOR 0xc0000087
|
||||
#define MSR_K6_PFIR 0xc0000088
|
||||
|
||||
/* Centaur-Hauls/IDT defined MSRs. */
|
||||
#define MSR_IDT_FCR1 0x00000107
|
||||
#define MSR_IDT_FCR2 0x00000108
|
||||
#define MSR_IDT_FCR3 0x00000109
|
||||
#define MSR_IDT_FCR4 0x0000010a
|
||||
|
||||
#define MSR_IDT_MCR0 0x00000110
|
||||
#define MSR_IDT_MCR1 0x00000111
|
||||
#define MSR_IDT_MCR2 0x00000112
|
||||
#define MSR_IDT_MCR3 0x00000113
|
||||
#define MSR_IDT_MCR4 0x00000114
|
||||
#define MSR_IDT_MCR5 0x00000115
|
||||
#define MSR_IDT_MCR6 0x00000116
|
||||
#define MSR_IDT_MCR7 0x00000117
|
||||
#define MSR_IDT_MCR_CTRL 0x00000120
|
||||
|
||||
/* VIA Cyrix defined MSRs*/
|
||||
#define MSR_VIA_FCR 0x00001107
|
||||
#define MSR_VIA_LONGHAUL 0x0000110a
|
||||
#define MSR_VIA_RNG 0x0000110b
|
||||
#define MSR_VIA_BCR2 0x00001147
|
||||
|
||||
/* Transmeta defined MSRs */
|
||||
#define MSR_TMTA_LONGRUN_CTRL 0x80868010
|
||||
#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
|
||||
#define MSR_TMTA_LRTI_READOUT 0x80868018
|
||||
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
|
||||
|
||||
/* Intel defined MSRs. */
|
||||
#define MSR_IA32_P5_MC_ADDR 0x00000000
|
||||
#define MSR_IA32_P5_MC_TYPE 0x00000001
|
||||
#define MSR_IA32_TSC 0x00000010
|
||||
#define MSR_IA32_PLATFORM_ID 0x00000017
|
||||
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
|
||||
#define MSR_EBC_FREQUENCY_ID 0x0000002c
|
||||
#define MSR_SMI_COUNT 0x00000034
|
||||
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
|
||||
#define MSR_IA32_TSC_ADJUST 0x0000003b
|
||||
#define MSR_IA32_BNDCFGS 0x00000d90
|
||||
|
||||
#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
|
||||
|
||||
#define MSR_IA32_XSS 0x00000da0
|
||||
|
||||
#define FEATURE_CONTROL_LOCKED (1<<0)
|
||||
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
|
||||
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
|
||||
#define FEATURE_CONTROL_LMCE (1<<20)
|
||||
|
||||
#define MSR_IA32_APICBASE 0x0000001b
|
||||
#define MSR_IA32_APICBASE_BSP (1<<8)
|
||||
#define MSR_IA32_APICBASE_ENABLE (1<<11)
|
||||
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
|
||||
|
||||
#define MSR_IA32_TSCDEADLINE 0x000006e0
|
||||
|
||||
#define MSR_IA32_UCODE_WRITE 0x00000079
|
||||
#define MSR_IA32_UCODE_REV 0x0000008b
|
||||
|
||||
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
|
||||
#define MSR_IA32_SMBASE 0x0000009e
|
||||
|
||||
#define MSR_IA32_PERF_STATUS 0x00000198
|
||||
#define MSR_IA32_PERF_CTL 0x00000199
|
||||
#define INTEL_PERF_CTL_MASK 0xffff
|
||||
|
||||
#define MSR_IA32_MPERF 0x000000e7
|
||||
#define MSR_IA32_APERF 0x000000e8
|
||||
|
||||
#define MSR_IA32_THERM_CONTROL 0x0000019a
|
||||
#define MSR_IA32_THERM_INTERRUPT 0x0000019b
|
||||
|
||||
#define THERM_INT_HIGH_ENABLE (1 << 0)
|
||||
#define THERM_INT_LOW_ENABLE (1 << 1)
|
||||
#define THERM_INT_PLN_ENABLE (1 << 24)
|
||||
|
||||
#define MSR_IA32_THERM_STATUS 0x0000019c
|
||||
|
||||
#define THERM_STATUS_PROCHOT (1 << 0)
|
||||
#define THERM_STATUS_POWER_LIMIT (1 << 10)
|
||||
|
||||
#define MSR_THERM2_CTL 0x0000019d
|
||||
|
||||
#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
|
||||
|
||||
#define MSR_IA32_MISC_ENABLE 0x000001a0
|
||||
|
||||
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
|
||||
|
||||
#define MSR_MISC_FEATURE_CONTROL 0x000001a4
|
||||
#define MSR_MISC_PWR_MGMT 0x000001aa
|
||||
|
||||
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
|
||||
#define ENERGY_PERF_BIAS_PERFORMANCE 0
|
||||
#define ENERGY_PERF_BIAS_BALANCE_PERFORMANCE 4
|
||||
#define ENERGY_PERF_BIAS_NORMAL 6
|
||||
#define ENERGY_PERF_BIAS_BALANCE_POWERSAVE 8
|
||||
#define ENERGY_PERF_BIAS_POWERSAVE 15
|
||||
|
||||
#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
|
||||
|
||||
#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
|
||||
#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
|
||||
|
||||
#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
|
||||
|
||||
#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
|
||||
#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
|
||||
#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
|
||||
|
||||
/* Thermal Thresholds Support */
|
||||
#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
|
||||
#define THERM_SHIFT_THRESHOLD0 8
|
||||
#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
|
||||
#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
|
||||
#define THERM_SHIFT_THRESHOLD1 16
|
||||
#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
|
||||
#define THERM_STATUS_THRESHOLD0 (1 << 6)
|
||||
#define THERM_LOG_THRESHOLD0 (1 << 7)
|
||||
#define THERM_STATUS_THRESHOLD1 (1 << 8)
|
||||
#define THERM_LOG_THRESHOLD1 (1 << 9)
|
||||
|
||||
/* MISC_ENABLE bits: architectural */
|
||||
#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0
|
||||
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_TCC_BIT 1
|
||||
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_EMON_BIT 7
|
||||
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11
|
||||
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12
|
||||
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16
|
||||
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
|
||||
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
|
||||
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
|
||||
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
|
||||
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
|
||||
|
||||
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
|
||||
#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2
|
||||
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_TM1_BIT 3
|
||||
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4
|
||||
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6
|
||||
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8
|
||||
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9
|
||||
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_FERR_BIT 10
|
||||
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10
|
||||
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_TM2_BIT 13
|
||||
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19
|
||||
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20
|
||||
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24
|
||||
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37
|
||||
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38
|
||||
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
|
||||
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39
|
||||
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
|
||||
|
||||
/* MISC_FEATURES_ENABLES non-architectural features */
|
||||
#define MSR_MISC_FEATURES_ENABLES 0x00000140
|
||||
|
||||
#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT 0
|
||||
#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT BIT_ULL(MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT)
|
||||
#define MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT 1
|
||||
|
||||
#define MSR_IA32_TSC_DEADLINE 0x000006E0
|
||||
|
||||
|
||||
#define MSR_TSX_FORCE_ABORT 0x0000010F
|
||||
|
||||
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
|
||||
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
|
||||
|
||||
/* P4/Xeon+ specific */
|
||||
#define MSR_IA32_MCG_EAX 0x00000180
|
||||
#define MSR_IA32_MCG_EBX 0x00000181
|
||||
#define MSR_IA32_MCG_ECX 0x00000182
|
||||
#define MSR_IA32_MCG_EDX 0x00000183
|
||||
#define MSR_IA32_MCG_ESI 0x00000184
|
||||
#define MSR_IA32_MCG_EDI 0x00000185
|
||||
#define MSR_IA32_MCG_EBP 0x00000186
|
||||
#define MSR_IA32_MCG_ESP 0x00000187
|
||||
#define MSR_IA32_MCG_EFLAGS 0x00000188
|
||||
#define MSR_IA32_MCG_EIP 0x00000189
|
||||
#define MSR_IA32_MCG_RESERVED 0x0000018a
|
||||
|
||||
/* Pentium IV performance counter MSRs */
|
||||
#define MSR_P4_BPU_PERFCTR0 0x00000300
|
||||
#define MSR_P4_BPU_PERFCTR1 0x00000301
|
||||
#define MSR_P4_BPU_PERFCTR2 0x00000302
|
||||
#define MSR_P4_BPU_PERFCTR3 0x00000303
|
||||
#define MSR_P4_MS_PERFCTR0 0x00000304
|
||||
#define MSR_P4_MS_PERFCTR1 0x00000305
|
||||
#define MSR_P4_MS_PERFCTR2 0x00000306
|
||||
#define MSR_P4_MS_PERFCTR3 0x00000307
|
||||
#define MSR_P4_FLAME_PERFCTR0 0x00000308
|
||||
#define MSR_P4_FLAME_PERFCTR1 0x00000309
|
||||
#define MSR_P4_FLAME_PERFCTR2 0x0000030a
|
||||
#define MSR_P4_FLAME_PERFCTR3 0x0000030b
|
||||
#define MSR_P4_IQ_PERFCTR0 0x0000030c
|
||||
#define MSR_P4_IQ_PERFCTR1 0x0000030d
|
||||
#define MSR_P4_IQ_PERFCTR2 0x0000030e
|
||||
#define MSR_P4_IQ_PERFCTR3 0x0000030f
|
||||
#define MSR_P4_IQ_PERFCTR4 0x00000310
|
||||
#define MSR_P4_IQ_PERFCTR5 0x00000311
|
||||
#define MSR_P4_BPU_CCCR0 0x00000360
|
||||
#define MSR_P4_BPU_CCCR1 0x00000361
|
||||
#define MSR_P4_BPU_CCCR2 0x00000362
|
||||
#define MSR_P4_BPU_CCCR3 0x00000363
|
||||
#define MSR_P4_MS_CCCR0 0x00000364
|
||||
#define MSR_P4_MS_CCCR1 0x00000365
|
||||
#define MSR_P4_MS_CCCR2 0x00000366
|
||||
#define MSR_P4_MS_CCCR3 0x00000367
|
||||
#define MSR_P4_FLAME_CCCR0 0x00000368
|
||||
#define MSR_P4_FLAME_CCCR1 0x00000369
|
||||
#define MSR_P4_FLAME_CCCR2 0x0000036a
|
||||
#define MSR_P4_FLAME_CCCR3 0x0000036b
|
||||
#define MSR_P4_IQ_CCCR0 0x0000036c
|
||||
#define MSR_P4_IQ_CCCR1 0x0000036d
|
||||
#define MSR_P4_IQ_CCCR2 0x0000036e
|
||||
#define MSR_P4_IQ_CCCR3 0x0000036f
|
||||
#define MSR_P4_IQ_CCCR4 0x00000370
|
||||
#define MSR_P4_IQ_CCCR5 0x00000371
|
||||
#define MSR_P4_ALF_ESCR0 0x000003ca
|
||||
#define MSR_P4_ALF_ESCR1 0x000003cb
|
||||
#define MSR_P4_BPU_ESCR0 0x000003b2
|
||||
#define MSR_P4_BPU_ESCR1 0x000003b3
|
||||
#define MSR_P4_BSU_ESCR0 0x000003a0
|
||||
#define MSR_P4_BSU_ESCR1 0x000003a1
|
||||
#define MSR_P4_CRU_ESCR0 0x000003b8
|
||||
#define MSR_P4_CRU_ESCR1 0x000003b9
|
||||
#define MSR_P4_CRU_ESCR2 0x000003cc
|
||||
#define MSR_P4_CRU_ESCR3 0x000003cd
|
||||
#define MSR_P4_CRU_ESCR4 0x000003e0
|
||||
#define MSR_P4_CRU_ESCR5 0x000003e1
|
||||
#define MSR_P4_DAC_ESCR0 0x000003a8
|
||||
#define MSR_P4_DAC_ESCR1 0x000003a9
|
||||
#define MSR_P4_FIRM_ESCR0 0x000003a4
|
||||
#define MSR_P4_FIRM_ESCR1 0x000003a5
|
||||
#define MSR_P4_FLAME_ESCR0 0x000003a6
|
||||
#define MSR_P4_FLAME_ESCR1 0x000003a7
|
||||
#define MSR_P4_FSB_ESCR0 0x000003a2
|
||||
#define MSR_P4_FSB_ESCR1 0x000003a3
|
||||
#define MSR_P4_IQ_ESCR0 0x000003ba
|
||||
#define MSR_P4_IQ_ESCR1 0x000003bb
|
||||
#define MSR_P4_IS_ESCR0 0x000003b4
|
||||
#define MSR_P4_IS_ESCR1 0x000003b5
|
||||
#define MSR_P4_ITLB_ESCR0 0x000003b6
|
||||
#define MSR_P4_ITLB_ESCR1 0x000003b7
|
||||
#define MSR_P4_IX_ESCR0 0x000003c8
|
||||
#define MSR_P4_IX_ESCR1 0x000003c9
|
||||
#define MSR_P4_MOB_ESCR0 0x000003aa
|
||||
#define MSR_P4_MOB_ESCR1 0x000003ab
|
||||
#define MSR_P4_MS_ESCR0 0x000003c0
|
||||
#define MSR_P4_MS_ESCR1 0x000003c1
|
||||
#define MSR_P4_PMH_ESCR0 0x000003ac
|
||||
#define MSR_P4_PMH_ESCR1 0x000003ad
|
||||
#define MSR_P4_RAT_ESCR0 0x000003bc
|
||||
#define MSR_P4_RAT_ESCR1 0x000003bd
|
||||
#define MSR_P4_SAAT_ESCR0 0x000003ae
|
||||
#define MSR_P4_SAAT_ESCR1 0x000003af
|
||||
#define MSR_P4_SSU_ESCR0 0x000003be
|
||||
#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
|
||||
|
||||
#define MSR_P4_TBPU_ESCR0 0x000003c2
|
||||
#define MSR_P4_TBPU_ESCR1 0x000003c3
|
||||
#define MSR_P4_TC_ESCR0 0x000003c4
|
||||
#define MSR_P4_TC_ESCR1 0x000003c5
|
||||
#define MSR_P4_U2L_ESCR0 0x000003b0
|
||||
#define MSR_P4_U2L_ESCR1 0x000003b1
|
||||
|
||||
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
|
||||
|
||||
/* Intel Core-based CPU performance counters */
|
||||
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
|
||||
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
|
||||
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
|
||||
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
|
||||
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
|
||||
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
|
||||
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
|
||||
|
||||
/* PERF_GLOBAL_OVF_CTL bits */
|
||||
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55
|
||||
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT)
|
||||
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT 62
|
||||
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT)
|
||||
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT 63
|
||||
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT)
|
||||
|
||||
/* Geode defined MSRs */
|
||||
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
|
||||
|
||||
/* Intel VT MSRs */
|
||||
#define MSR_IA32_VMX_BASIC 0x00000480
|
||||
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
|
||||
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
|
||||
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
|
||||
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
|
||||
#define MSR_IA32_VMX_MISC 0x00000485
|
||||
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
|
||||
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
|
||||
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
|
||||
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
|
||||
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
|
||||
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
|
||||
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
|
||||
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
|
||||
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
|
||||
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
|
||||
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
|
||||
#define MSR_IA32_VMX_VMFUNC 0x00000491
|
||||
|
||||
/* VMX_BASIC bits and bitmasks */
|
||||
#define VMX_BASIC_VMCS_SIZE_SHIFT 32
|
||||
#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
|
||||
#define VMX_BASIC_64 0x0001000000000000LLU
|
||||
#define VMX_BASIC_MEM_TYPE_SHIFT 50
|
||||
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
|
||||
#define VMX_BASIC_MEM_TYPE_WB 6LLU
|
||||
#define VMX_BASIC_INOUT 0x0040000000000000LLU
|
||||
|
||||
/* MSR_IA32_VMX_MISC bits */
|
||||
#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
|
||||
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
|
||||
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
|
||||
/* AMD-V MSRs */
|
||||
|
||||
#define MSR_VM_CR 0xc0010114
|
||||
#define MSR_VM_IGNNE 0xc0010115
|
||||
#define MSR_VM_HSAVE_PA 0xc0010117
|
||||
|
||||
#endif /* _ASM_X86_MSR_INDEX_H */
|
@ -561,6 +561,11 @@ trace.*::
|
||||
trace.show_zeros::
|
||||
Do not suppress syscall arguments that are equal to zero.
|
||||
|
||||
trace.tracepoint_beautifiers::
|
||||
Use "libtraceevent" to use that library to augment the tracepoint arguments,
|
||||
"libbeauty", the default, to use the same argument beautifiers used in the
|
||||
strace-like sys_enter+sys_exit lines.
|
||||
|
||||
llvm.*::
|
||||
llvm.clang-path::
|
||||
Path to clang. If omit, search it from $PATH.
|
||||
|
@ -95,6 +95,11 @@ OPTIONS
|
||||
diff.compute config option. See COMPARISON METHODS section for
|
||||
more info.
|
||||
|
||||
--cycles-hist::
|
||||
Report a histogram and the standard deviation for cycles data.
|
||||
It can help us to judge if the reported cycles data is noisy or
|
||||
not. This option should be used with '-c cycles'.
|
||||
|
||||
-p::
|
||||
--period::
|
||||
Show period values for both compared hist entries.
|
||||
|
@ -42,6 +42,11 @@ OPTIONS
|
||||
Prefixing with ! shows all syscalls but the ones specified. You may
|
||||
need to escape it.
|
||||
|
||||
--filter=<filter>::
|
||||
Event filter. This option should follow an event selector (-e) which
|
||||
selects tracepoint event(s).
|
||||
|
||||
|
||||
-D msecs::
|
||||
--delay msecs::
|
||||
After starting the program, wait msecs before measuring. This is useful to
|
||||
@ -219,6 +224,11 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
|
||||
may happen, for instance, when a thread gets migrated to a different CPU
|
||||
while processing a syscall.
|
||||
|
||||
--libtraceevent_print::
|
||||
Use libtraceevent to print tracepoint arguments. By default 'perf trace' uses
|
||||
the same beautifiers used in the strace-like enter+exit lines to augment the
|
||||
tracepoint arguments.
|
||||
|
||||
--map-dump::
|
||||
Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
|
||||
living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
|
||||
|
@ -188,7 +188,7 @@ endif
|
||||
|
||||
# Treat warnings as errors unless directed not to
|
||||
ifneq ($(WERROR),0)
|
||||
CFLAGS += -Werror
|
||||
CORE_CFLAGS += -Werror
|
||||
CXXFLAGS += -Werror
|
||||
endif
|
||||
|
||||
@ -198,9 +198,9 @@ endif
|
||||
|
||||
ifeq ($(DEBUG),0)
|
||||
ifeq ($(CC_NO_CLANG), 0)
|
||||
CFLAGS += -O3
|
||||
CORE_CFLAGS += -O3
|
||||
else
|
||||
CFLAGS += -O6
|
||||
CORE_CFLAGS += -O6
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -245,12 +245,12 @@ FEATURE_CHECK_LDFLAGS-libaio = -lrt
|
||||
|
||||
FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
|
||||
|
||||
CFLAGS += -fno-omit-frame-pointer
|
||||
CFLAGS += -ggdb3
|
||||
CFLAGS += -funwind-tables
|
||||
CFLAGS += -Wall
|
||||
CFLAGS += -Wextra
|
||||
CFLAGS += -std=gnu99
|
||||
CORE_CFLAGS += -fno-omit-frame-pointer
|
||||
CORE_CFLAGS += -ggdb3
|
||||
CORE_CFLAGS += -funwind-tables
|
||||
CORE_CFLAGS += -Wall
|
||||
CORE_CFLAGS += -Wextra
|
||||
CORE_CFLAGS += -std=gnu99
|
||||
|
||||
CXXFLAGS += -std=gnu++11 -fno-exceptions -fno-rtti
|
||||
CXXFLAGS += -Wall
|
||||
@ -272,12 +272,12 @@ include $(FEATURES_DUMP)
|
||||
endif
|
||||
|
||||
ifeq ($(feature-stackprotector-all), 1)
|
||||
CFLAGS += -fstack-protector-all
|
||||
CORE_CFLAGS += -fstack-protector-all
|
||||
endif
|
||||
|
||||
ifeq ($(DEBUG),0)
|
||||
ifeq ($(feature-fortify-source), 1)
|
||||
CFLAGS += -D_FORTIFY_SOURCE=2
|
||||
CORE_CFLAGS += -D_FORTIFY_SOURCE=2
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -301,10 +301,12 @@ INC_FLAGS += -I$(src-perf)/util
|
||||
INC_FLAGS += -I$(src-perf)
|
||||
INC_FLAGS += -I$(srctree)/tools/lib/
|
||||
|
||||
CFLAGS += $(INC_FLAGS)
|
||||
CORE_CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
|
||||
|
||||
CFLAGS += $(CORE_CFLAGS) $(INC_FLAGS)
|
||||
CXXFLAGS += $(INC_FLAGS)
|
||||
|
||||
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
|
||||
LIBPERF_CFLAGS := $(CORE_CFLAGS) $(EXTRA_CFLAGS)
|
||||
|
||||
ifeq ($(feature-sync-compare-and-swap), 1)
|
||||
CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
|
||||
|
@ -407,6 +407,7 @@ linux_uapi_dir := $(srctree)/tools/include/uapi/linux
|
||||
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
|
||||
arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
|
||||
x86_arch_asm_uapi_dir := $(srctree)/tools/arch/x86/include/uapi/asm/
|
||||
x86_arch_asm_dir := $(srctree)/tools/arch/x86/include/asm/
|
||||
|
||||
beauty_outdir := $(OUTPUT)trace/beauty/generated
|
||||
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
|
||||
@ -543,6 +544,12 @@ x86_arch_prctl_code_tbl := $(srctree)/tools/perf/trace/beauty/x86_arch_prctl.sh
|
||||
$(x86_arch_prctl_code_array): $(x86_arch_asm_uapi_dir)/prctl.h $(x86_arch_prctl_code_tbl)
|
||||
$(Q)$(SHELL) '$(x86_arch_prctl_code_tbl)' $(x86_arch_asm_uapi_dir) > $@
|
||||
|
||||
x86_arch_MSRs_array := $(beauty_outdir)/x86_arch_MSRs_array.c
|
||||
x86_arch_MSRs_tbl := $(srctree)/tools/perf/trace/beauty/tracepoints/x86_msr.sh
|
||||
|
||||
$(x86_arch_MSRs_array): $(x86_arch_asm_dir)/msr-index.h $(x86_arch_MSRs_tbl)
|
||||
$(Q)$(SHELL) '$(x86_arch_MSRs_tbl)' $(x86_arch_asm_dir) > $@
|
||||
|
||||
rename_flags_array := $(beauty_outdir)/rename_flags_array.c
|
||||
rename_flags_tbl := $(srctree)/tools/perf/trace/beauty/rename_flags.sh
|
||||
|
||||
@ -677,6 +684,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
|
||||
$(perf_ioctl_array) \
|
||||
$(prctl_option_array) \
|
||||
$(usbdevfs_ioctl_array) \
|
||||
$(x86_arch_MSRs_array) \
|
||||
$(x86_arch_prctl_code_array) \
|
||||
$(rename_flags_array) \
|
||||
$(arch_errno_name_array) \
|
||||
@ -761,7 +769,7 @@ $(LIBBPF)-clean:
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
|
||||
|
||||
$(LIBPERF): FORCE
|
||||
$(Q)$(MAKE) -C $(LIBPERF_DIR) O=$(OUTPUT) $(OUTPUT)libperf.a
|
||||
$(Q)$(MAKE) -C $(LIBPERF_DIR) EXTRA_CFLAGS="$(LIBPERF_CFLAGS)" O=$(OUTPUT) $(OUTPUT)libperf.a
|
||||
|
||||
$(LIBPERF)-clean:
|
||||
$(call QUIET_CLEAN, libperf)
|
||||
@ -981,6 +989,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
|
||||
$(OUTPUT)$(perf_ioctl_array) \
|
||||
$(OUTPUT)$(prctl_option_array) \
|
||||
$(OUTPUT)$(usbdevfs_ioctl_array) \
|
||||
$(OUTPUT)$(x86_arch_MSRs_array) \
|
||||
$(OUTPUT)$(x86_arch_prctl_code_array) \
|
||||
$(OUTPUT)$(rename_flags_array) \
|
||||
$(OUTPUT)$(arch_errno_name_array) \
|
||||
|
@ -1,3 +1,5 @@
|
||||
perf-y += perf_regs.o
|
||||
|
||||
perf-$(CONFIG_DWARF) += dwarf-regs.o
|
||||
|
||||
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
|
||||
|
6
tools/perf/arch/arm/util/perf_regs.c
Normal file
6
tools/perf/arch/arm/util/perf_regs.c
Normal file
@ -0,0 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "../../util/perf_regs.h"
|
||||
|
||||
const struct sample_reg sample_reg_masks[] = {
|
||||
SMPL_REG_END
|
||||
};
|
@ -1,4 +1,5 @@
|
||||
perf-y += header.o
|
||||
perf-y += perf_regs.o
|
||||
perf-y += sym-handling.o
|
||||
perf-$(CONFIG_DWARF) += dwarf-regs.o
|
||||
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
|
||||
|
6
tools/perf/arch/arm64/util/perf_regs.c
Normal file
6
tools/perf/arch/arm64/util/perf_regs.c
Normal file
@ -0,0 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "../../util/perf_regs.h"
|
||||
|
||||
const struct sample_reg sample_reg_masks[] = {
|
||||
SMPL_REG_END
|
||||
};
|
@ -1,2 +1,4 @@
|
||||
perf-y += perf_regs.o
|
||||
|
||||
perf-$(CONFIG_DWARF) += dwarf-regs.o
|
||||
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
|
||||
|
6
tools/perf/arch/csky/util/perf_regs.c
Normal file
6
tools/perf/arch/csky/util/perf_regs.c
Normal file
@ -0,0 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "../../util/perf_regs.h"
|
||||
|
||||
const struct sample_reg sample_reg_masks[] = {
|
||||
SMPL_REG_END
|
||||
};
|
@ -1,2 +1,4 @@
|
||||
perf-y += perf_regs.o
|
||||
|
||||
perf-$(CONFIG_DWARF) += dwarf-regs.o
|
||||
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
|
||||
|
6
tools/perf/arch/riscv/util/perf_regs.c
Normal file
6
tools/perf/arch/riscv/util/perf_regs.c
Normal file
@ -0,0 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "../../util/perf_regs.h"
|
||||
|
||||
const struct sample_reg sample_reg_masks[] = {
|
||||
SMPL_REG_END
|
||||
};
|
@ -1,5 +1,6 @@
|
||||
perf-y += header.o
|
||||
perf-y += kvm-stat.o
|
||||
perf-y += perf_regs.o
|
||||
|
||||
perf-$(CONFIG_DWARF) += dwarf-regs.o
|
||||
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
|
||||
|
6
tools/perf/arch/s390/util/perf_regs.c
Normal file
6
tools/perf/arch/s390/util/perf_regs.c
Normal file
@ -0,0 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "../../util/perf_regs.h"
|
||||
|
||||
const struct sample_reg sample_reg_masks[] = {
|
||||
SMPL_REG_END
|
||||
};
|
@ -9,6 +9,7 @@
|
||||
#include <sys/prctl.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
@ -117,10 +118,10 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
struct perf_sample sample;
|
||||
|
||||
if (event->header.type != PERF_RECORD_COMM ||
|
||||
@ -139,9 +140,9 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
comm2_time = sample.time;
|
||||
}
|
||||
next_event:
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (!comm1_time || !comm2_time)
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "util/time-utils.h"
|
||||
#include "util/annotate.h"
|
||||
#include "util/map.h"
|
||||
#include "util/spark.h"
|
||||
#include <linux/err.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <subcmd/pager.h>
|
||||
@ -53,6 +54,7 @@ enum {
|
||||
PERF_HPP_DIFF__FORMULA,
|
||||
PERF_HPP_DIFF__DELTA_ABS,
|
||||
PERF_HPP_DIFF__CYCLES,
|
||||
PERF_HPP_DIFF__CYCLES_HIST,
|
||||
|
||||
PERF_HPP_DIFF__MAX_INDEX
|
||||
};
|
||||
@ -87,6 +89,7 @@ static bool force;
|
||||
static bool show_period;
|
||||
static bool show_formula;
|
||||
static bool show_baseline_only;
|
||||
static bool cycles_hist;
|
||||
static unsigned int sort_compute = 1;
|
||||
|
||||
static s64 compute_wdiff_w1;
|
||||
@ -164,6 +167,10 @@ static struct header_column {
|
||||
[PERF_HPP_DIFF__CYCLES] = {
|
||||
.name = "[Program Block Range] Cycles Diff",
|
||||
.width = 70,
|
||||
},
|
||||
[PERF_HPP_DIFF__CYCLES_HIST] = {
|
||||
.name = "stddev/Hist",
|
||||
.width = NUM_SPARKS + 9,
|
||||
}
|
||||
};
|
||||
|
||||
@ -610,6 +617,9 @@ static void init_block_info(struct block_info *bi, struct symbol *sym,
|
||||
bi->cycles_aggr = ch->cycles_aggr;
|
||||
bi->num = ch->num;
|
||||
bi->num_aggr = ch->num_aggr;
|
||||
|
||||
memcpy(bi->cycles_spark, ch->cycles_spark,
|
||||
NUM_SPARKS * sizeof(u64));
|
||||
}
|
||||
|
||||
static int process_block_per_sym(struct hist_entry *he)
|
||||
@ -689,6 +699,21 @@ static struct hist_entry *get_block_pair(struct hist_entry *he,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void init_spark_values(unsigned long *svals, int num)
|
||||
{
|
||||
for (int i = 0; i < num; i++)
|
||||
svals[i] = 0;
|
||||
}
|
||||
|
||||
static void update_spark_value(unsigned long *svals, int num,
|
||||
struct stats *stats, u64 val)
|
||||
{
|
||||
int n = stats->n;
|
||||
|
||||
if (n < num)
|
||||
svals[n] = val;
|
||||
}
|
||||
|
||||
static void compute_cycles_diff(struct hist_entry *he,
|
||||
struct hist_entry *pair)
|
||||
{
|
||||
@ -697,6 +722,26 @@ static void compute_cycles_diff(struct hist_entry *he,
|
||||
pair->diff.cycles =
|
||||
pair->block_info->cycles_aggr / pair->block_info->num_aggr -
|
||||
he->block_info->cycles_aggr / he->block_info->num_aggr;
|
||||
|
||||
if (!cycles_hist)
|
||||
return;
|
||||
|
||||
init_stats(&pair->diff.stats);
|
||||
init_spark_values(pair->diff.svals, NUM_SPARKS);
|
||||
|
||||
for (int i = 0; i < pair->block_info->num; i++) {
|
||||
u64 val;
|
||||
|
||||
if (i >= he->block_info->num || i >= NUM_SPARKS)
|
||||
break;
|
||||
|
||||
val = labs(pair->block_info->cycles_spark[i] -
|
||||
he->block_info->cycles_spark[i]);
|
||||
|
||||
update_spark_value(pair->diff.svals, NUM_SPARKS,
|
||||
&pair->diff.stats, val);
|
||||
update_stats(&pair->diff.stats, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1255,6 +1300,9 @@ static const struct option options[] = {
|
||||
"Show period values."),
|
||||
OPT_BOOLEAN('F', "formula", &show_formula,
|
||||
"Show formula."),
|
||||
OPT_BOOLEAN(0, "cycles-hist", &cycles_hist,
|
||||
"Show cycles histogram and standard deviation "
|
||||
"- WARNING: use only with -c cycles."),
|
||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
||||
"dump raw trace in ASCII"),
|
||||
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
||||
@ -1462,6 +1510,90 @@ static int hpp__color_cycles(struct perf_hpp_fmt *fmt,
|
||||
return __hpp__color_compare(fmt, hpp, he, COMPUTE_CYCLES);
|
||||
}
|
||||
|
||||
static int all_zero(unsigned long *vals, int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
if (vals[i] != 0)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int print_cycles_spark(char *bf, int size, unsigned long *svals, u64 n)
|
||||
{
|
||||
int printed;
|
||||
|
||||
if (n <= 1)
|
||||
return 0;
|
||||
|
||||
if (n > NUM_SPARKS)
|
||||
n = NUM_SPARKS;
|
||||
if (all_zero(svals, n))
|
||||
return 0;
|
||||
|
||||
printed = print_spark(bf, size, svals, n);
|
||||
printed += scnprintf(bf + printed, size - printed, " ");
|
||||
return printed;
|
||||
}
|
||||
|
||||
static int hpp__color_cycles_hist(struct perf_hpp_fmt *fmt,
|
||||
struct perf_hpp *hpp, struct hist_entry *he)
|
||||
{
|
||||
struct diff_hpp_fmt *dfmt =
|
||||
container_of(fmt, struct diff_hpp_fmt, fmt);
|
||||
struct hist_entry *pair = get_pair_fmt(he, dfmt);
|
||||
struct block_hist *bh = container_of(he, struct block_hist, he);
|
||||
struct block_hist *bh_pair;
|
||||
struct hist_entry *block_he;
|
||||
char spark[32], buf[128];
|
||||
double r;
|
||||
int ret, pad;
|
||||
|
||||
if (!pair) {
|
||||
if (bh->block_idx)
|
||||
hpp->skip = true;
|
||||
|
||||
goto no_print;
|
||||
}
|
||||
|
||||
bh_pair = container_of(pair, struct block_hist, he);
|
||||
|
||||
block_he = hists__get_entry(&bh_pair->block_hists, bh->block_idx);
|
||||
if (!block_he) {
|
||||
hpp->skip = true;
|
||||
goto no_print;
|
||||
}
|
||||
|
||||
ret = print_cycles_spark(spark, sizeof(spark), block_he->diff.svals,
|
||||
block_he->diff.stats.n);
|
||||
|
||||
r = rel_stddev_stats(stddev_stats(&block_he->diff.stats),
|
||||
avg_stats(&block_he->diff.stats));
|
||||
|
||||
if (ret) {
|
||||
/*
|
||||
* Padding spaces if number of sparks less than NUM_SPARKS
|
||||
* otherwise the output is not aligned.
|
||||
*/
|
||||
pad = NUM_SPARKS - ((ret - 1) / 3);
|
||||
scnprintf(buf, sizeof(buf), "%s%5.1f%% %s", "\u00B1", r, spark);
|
||||
ret = scnprintf(hpp->buf, hpp->size, "%*s",
|
||||
dfmt->header_width, buf);
|
||||
|
||||
if (pad) {
|
||||
ret += scnprintf(hpp->buf + ret, hpp->size - ret,
|
||||
"%-*s", pad, " ");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
no_print:
|
||||
return scnprintf(hpp->buf, hpp->size, "%*s",
|
||||
dfmt->header_width, " ");
|
||||
}
|
||||
|
||||
static void
|
||||
hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size)
|
||||
{
|
||||
@ -1667,6 +1799,10 @@ static void data__hpp_register(struct data__file *d, int idx)
|
||||
fmt->color = hpp__color_cycles;
|
||||
fmt->sort = hist_entry__cmp_nop;
|
||||
break;
|
||||
case PERF_HPP_DIFF__CYCLES_HIST:
|
||||
fmt->color = hpp__color_cycles_hist;
|
||||
fmt->sort = hist_entry__cmp_nop;
|
||||
break;
|
||||
default:
|
||||
fmt->sort = hist_entry__cmp_nop;
|
||||
break;
|
||||
@ -1692,10 +1828,14 @@ static int ui_init(void)
|
||||
* PERF_HPP_DIFF__DELTA
|
||||
* PERF_HPP_DIFF__RATIO
|
||||
* PERF_HPP_DIFF__WEIGHTED_DIFF
|
||||
* PERF_HPP_DIFF__CYCLES
|
||||
*/
|
||||
data__hpp_register(d, i ? compute_2_hpp[compute] :
|
||||
PERF_HPP_DIFF__BASELINE);
|
||||
|
||||
if (cycles_hist && i)
|
||||
data__hpp_register(d, PERF_HPP_DIFF__CYCLES_HIST);
|
||||
|
||||
/*
|
||||
* And the rest:
|
||||
*
|
||||
@ -1850,6 +1990,9 @@ int cmd_diff(int argc, const char **argv)
|
||||
if (quiet)
|
||||
perf_quiet_option();
|
||||
|
||||
if (cycles_hist && (compute != COMPUTE_CYCLES))
|
||||
usage_with_options(diff_usage, options);
|
||||
|
||||
symbol__annotation_init();
|
||||
|
||||
if (symbol__init(NULL) < 0)
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <semaphore.h>
|
||||
#include <signal.h>
|
||||
#include <math.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
static const char *get_filename_for_perf_kvm(void)
|
||||
{
|
||||
@ -759,14 +760,14 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
||||
|
||||
*mmap_time = ULLONG_MAX;
|
||||
md = &evlist->mmap[idx];
|
||||
err = perf_mmap__read_init(md);
|
||||
err = perf_mmap__read_init(&md->core);
|
||||
if (err < 0)
|
||||
return (err == -EAGAIN) ? 0 : -1;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
err = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
|
||||
if (err) {
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
pr_err("Failed to parse sample\n");
|
||||
return -1;
|
||||
}
|
||||
@ -776,7 +777,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
||||
* FIXME: Here we can't consume the event, as perf_session__queue_event will
|
||||
* point to it, and it'll get possibly overwritten by the kernel.
|
||||
*/
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (err) {
|
||||
pr_err("Failed to enqueue sample: %d\n", err);
|
||||
@ -793,7 +794,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
||||
break;
|
||||
}
|
||||
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
|
||||
* every aio write request started in record__aio_push() so
|
||||
* decrement it because the request is now complete.
|
||||
*/
|
||||
perf_mmap__put(md);
|
||||
perf_mmap__put(&md->core);
|
||||
rc = 1;
|
||||
} else {
|
||||
/*
|
||||
@ -276,7 +276,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
|
||||
|
||||
if (record__comp_enabled(aio->rec)) {
|
||||
size = zstd_compress(aio->rec->session, aio->data + aio->size,
|
||||
perf_mmap__mmap_len(map) - aio->size,
|
||||
mmap__mmap_len(map) - aio->size,
|
||||
buf, size);
|
||||
} else {
|
||||
memcpy(aio->data + aio->size, buf, size);
|
||||
@ -293,7 +293,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
|
||||
* after started aio request completion or at record__aio_push()
|
||||
* if the request failed to start.
|
||||
*/
|
||||
perf_mmap__get(map);
|
||||
perf_mmap__get(&map->core);
|
||||
}
|
||||
|
||||
aio->size += size;
|
||||
@ -332,7 +332,7 @@ static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
|
||||
* map->refcount is decremented in record__aio_complete() after
|
||||
* aio write operation finishes successfully.
|
||||
*/
|
||||
perf_mmap__put(map);
|
||||
perf_mmap__put(&map->core);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -488,7 +488,7 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
|
||||
struct record *rec = to;
|
||||
|
||||
if (record__comp_enabled(rec)) {
|
||||
size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
|
||||
size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
|
||||
bf = map->data;
|
||||
}
|
||||
|
||||
|
@ -3605,11 +3605,6 @@ int cmd_script(int argc, const char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if (script.time_str && reltime) {
|
||||
fprintf(stderr, "Don't combine --reltime with --time\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (itrace_synth_opts.callchain &&
|
||||
itrace_synth_opts.callchain_sz > scripting_max_stack)
|
||||
scripting_max_stack = itrace_synth_opts.callchain_sz;
|
||||
|
@ -82,6 +82,7 @@
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
static volatile int done;
|
||||
static volatile int resize;
|
||||
@ -869,10 +870,10 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
||||
union perf_event *event;
|
||||
|
||||
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
return;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
int ret;
|
||||
|
||||
ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
|
||||
@ -883,7 +884,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (top->qe.rotate) {
|
||||
pthread_mutex_lock(&top->qe.mutex);
|
||||
@ -893,7 +894,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
||||
}
|
||||
}
|
||||
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
static void perf_top__mmap_read(struct perf_top *top)
|
||||
@ -1560,6 +1561,17 @@ int cmd_top(int argc, const char **argv)
|
||||
status = perf_config(perf_top_config, &top);
|
||||
if (status)
|
||||
return status;
|
||||
/*
|
||||
* Since the per arch annotation init routine may need the cpuid, read
|
||||
* it here, since we are not getting this from the perf.data header.
|
||||
*/
|
||||
status = perf_env__read_cpuid(&perf_env);
|
||||
if (status) {
|
||||
pr_err("Couldn't read the cpuid for this machine: %s\n",
|
||||
str_error_r(errno, errbuf, sizeof(errbuf)));
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
top.evlist->env = &perf_env;
|
||||
|
||||
argc = parse_options(argc, argv, options, top_usage, 0);
|
||||
if (argc)
|
||||
|
@ -77,6 +77,7 @@
|
||||
#include <sys/sysmacros.h>
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#ifndef O_CLOEXEC
|
||||
# define O_CLOEXEC 02000000
|
||||
@ -86,6 +87,33 @@
|
||||
# define F_LINUX_SPECIFIC_BASE 1024
|
||||
#endif
|
||||
|
||||
/*
|
||||
* strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
|
||||
*/
|
||||
struct syscall_arg_fmt {
|
||||
size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
|
||||
bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
|
||||
unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
|
||||
void *parm;
|
||||
const char *name;
|
||||
u16 nr_entries; // for arrays
|
||||
bool show_zero;
|
||||
};
|
||||
|
||||
struct syscall_fmt {
|
||||
const char *name;
|
||||
const char *alias;
|
||||
struct {
|
||||
const char *sys_enter,
|
||||
*sys_exit;
|
||||
} bpf_prog_name;
|
||||
struct syscall_arg_fmt arg[6];
|
||||
u8 nr_args;
|
||||
bool errpid;
|
||||
bool timeout;
|
||||
bool hexret;
|
||||
};
|
||||
|
||||
struct trace {
|
||||
struct perf_tool tool;
|
||||
struct syscalltbl *sctbl;
|
||||
@ -152,6 +180,7 @@ struct trace {
|
||||
bool print_sample;
|
||||
bool show_tool_stats;
|
||||
bool trace_syscalls;
|
||||
bool libtraceevent_print;
|
||||
bool kernel_syscallchains;
|
||||
s16 args_alignment;
|
||||
bool show_tstamp;
|
||||
@ -162,6 +191,7 @@ struct trace {
|
||||
bool force;
|
||||
bool vfs_getname;
|
||||
int trace_pgfaults;
|
||||
char *perfconfig_events;
|
||||
struct {
|
||||
struct ordered_events data;
|
||||
u64 last;
|
||||
@ -448,6 +478,34 @@ size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const
|
||||
return printed;
|
||||
}
|
||||
|
||||
bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sa->nr_entries; ++i) {
|
||||
if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
|
||||
*ret = sa->offset + i;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sas->nr_entries; ++i) {
|
||||
struct strarray *sa = sas->entries[i];
|
||||
|
||||
if (strarray__strtoul(sa, bf, size, ret))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
|
||||
struct syscall_arg *arg)
|
||||
{
|
||||
@ -499,6 +557,16 @@ size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *ar
|
||||
return scnprintf(bf, size, "%ld", arg->val);
|
||||
}
|
||||
|
||||
static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
|
||||
{
|
||||
// XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
|
||||
// fill missing comms using thread__set_comm()...
|
||||
// here or in a special syscall_arg__scnprintf_pid_sched_tp...
|
||||
return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries, arg->val);
|
||||
}
|
||||
|
||||
#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
|
||||
|
||||
static const char *bpf_cmd[] = {
|
||||
"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
|
||||
"MAP_GET_NEXT_KEY", "PROG_LOAD",
|
||||
@ -694,27 +762,7 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
|
||||
#include "trace/beauty/socket_type.c"
|
||||
#include "trace/beauty/waitid_options.c"
|
||||
|
||||
struct syscall_arg_fmt {
|
||||
size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
|
||||
unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
|
||||
void *parm;
|
||||
const char *name;
|
||||
bool show_zero;
|
||||
};
|
||||
|
||||
static struct syscall_fmt {
|
||||
const char *name;
|
||||
const char *alias;
|
||||
struct {
|
||||
const char *sys_enter,
|
||||
*sys_exit;
|
||||
} bpf_prog_name;
|
||||
struct syscall_arg_fmt arg[6];
|
||||
u8 nr_args;
|
||||
bool errpid;
|
||||
bool timeout;
|
||||
bool hexret;
|
||||
} syscall_fmts[] = {
|
||||
static struct syscall_fmt syscall_fmts[] = {
|
||||
{ .name = "access",
|
||||
.arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
|
||||
{ .name = "arch_prctl",
|
||||
@ -964,22 +1012,33 @@ static int syscall_fmt__cmp(const void *name, const void *fmtp)
|
||||
return strcmp(name, fmt->name);
|
||||
}
|
||||
|
||||
static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
|
||||
{
|
||||
return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
|
||||
}
|
||||
|
||||
static struct syscall_fmt *syscall_fmt__find(const char *name)
|
||||
{
|
||||
const int nmemb = ARRAY_SIZE(syscall_fmts);
|
||||
return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
|
||||
return __syscall_fmt__find(syscall_fmts, nmemb, name);
|
||||
}
|
||||
|
||||
static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nmemb; ++i) {
|
||||
if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
|
||||
return &fmts[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
|
||||
{
|
||||
int i, nmemb = ARRAY_SIZE(syscall_fmts);
|
||||
|
||||
for (i = 0; i < nmemb; ++i) {
|
||||
if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
|
||||
return &syscall_fmts[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
const int nmemb = ARRAY_SIZE(syscall_fmts);
|
||||
return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1453,15 +1512,38 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int syscall__set_arg_fmts(struct syscall *sc)
|
||||
{
|
||||
struct tep_format_field *field, *last_field = NULL;
|
||||
int idx = 0, len;
|
||||
static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
|
||||
{ .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }
|
||||
};
|
||||
|
||||
for (field = sc->args; field; field = field->next, ++idx) {
|
||||
static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
|
||||
{
|
||||
const struct syscall_arg_fmt *fmt = fmtp;
|
||||
return strcmp(name, fmt->name);
|
||||
}
|
||||
|
||||
static struct syscall_arg_fmt *
|
||||
__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
|
||||
{
|
||||
return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
|
||||
}
|
||||
|
||||
static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
|
||||
{
|
||||
const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
|
||||
return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
|
||||
}
|
||||
|
||||
static struct tep_format_field *
|
||||
syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
|
||||
{
|
||||
struct tep_format_field *last_field = NULL;
|
||||
int len;
|
||||
|
||||
for (; field; field = field->next, ++arg) {
|
||||
last_field = field;
|
||||
|
||||
if (sc->fmt && sc->fmt->arg[idx].scnprintf)
|
||||
if (arg->scnprintf)
|
||||
continue;
|
||||
|
||||
len = strlen(field->name);
|
||||
@ -1469,14 +1551,17 @@ static int syscall__set_arg_fmts(struct syscall *sc)
|
||||
if (strcmp(field->type, "const char *") == 0 &&
|
||||
((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
|
||||
strstr(field->name, "path") != NULL))
|
||||
sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
|
||||
arg->scnprintf = SCA_FILENAME;
|
||||
else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
|
||||
sc->arg_fmt[idx].scnprintf = SCA_PTR;
|
||||
arg->scnprintf = SCA_PTR;
|
||||
else if (strcmp(field->type, "pid_t") == 0)
|
||||
sc->arg_fmt[idx].scnprintf = SCA_PID;
|
||||
arg->scnprintf = SCA_PID;
|
||||
else if (strcmp(field->type, "umode_t") == 0)
|
||||
sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
|
||||
else if ((strcmp(field->type, "int") == 0 ||
|
||||
arg->scnprintf = SCA_MODE_T;
|
||||
else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstarts(field->type, "char")) {
|
||||
arg->scnprintf = SCA_CHAR_ARRAY;
|
||||
arg->nr_entries = field->arraylen;
|
||||
} else if ((strcmp(field->type, "int") == 0 ||
|
||||
strcmp(field->type, "unsigned int") == 0 ||
|
||||
strcmp(field->type, "long") == 0) &&
|
||||
len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
|
||||
@ -1487,10 +1572,24 @@ static int syscall__set_arg_fmts(struct syscall *sc)
|
||||
* 23 unsigned int
|
||||
* 7 unsigned long
|
||||
*/
|
||||
sc->arg_fmt[idx].scnprintf = SCA_FD;
|
||||
arg->scnprintf = SCA_FD;
|
||||
} else {
|
||||
struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
|
||||
|
||||
if (fmt) {
|
||||
arg->scnprintf = fmt->scnprintf;
|
||||
arg->strtoul = fmt->strtoul;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return last_field;
|
||||
}
|
||||
|
||||
static int syscall__set_arg_fmts(struct syscall *sc)
|
||||
{
|
||||
struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
|
||||
|
||||
if (last_field)
|
||||
sc->args_size = last_field->offset + last_field->size;
|
||||
|
||||
@ -1552,6 +1651,19 @@ static int trace__read_syscall_info(struct trace *trace, int id)
|
||||
return syscall__set_arg_fmts(sc);
|
||||
}
|
||||
|
||||
static int perf_evsel__init_tp_arg_scnprintf(struct evsel *evsel)
|
||||
{
|
||||
int nr_args = evsel->tp_format->format.nr_fields;
|
||||
|
||||
evsel->priv = calloc(nr_args, sizeof(struct syscall_arg_fmt));
|
||||
if (evsel->priv != NULL) {
|
||||
syscall_arg_fmt__init_array(evsel->priv, evsel->tp_format->format.fields);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int intcmp(const void *a, const void *b)
|
||||
{
|
||||
const int *one = a, *another = b;
|
||||
@ -1680,22 +1792,22 @@ static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
|
||||
* as mount 'flags' argument that needs ignoring some magic flag, see comment
|
||||
* in tools/perf/trace/beauty/mount_flags.c
|
||||
*/
|
||||
static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
|
||||
static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
|
||||
{
|
||||
if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
|
||||
return sc->arg_fmt[arg->idx].mask_val(arg, val);
|
||||
if (fmt && fmt->mask_val)
|
||||
return fmt->mask_val(arg, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
|
||||
struct syscall_arg *arg, unsigned long val)
|
||||
static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
|
||||
struct syscall_arg *arg, unsigned long val)
|
||||
{
|
||||
if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
|
||||
if (fmt && fmt->scnprintf) {
|
||||
arg->val = val;
|
||||
if (sc->arg_fmt[arg->idx].parm)
|
||||
arg->parm = sc->arg_fmt[arg->idx].parm;
|
||||
return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
|
||||
if (fmt->parm)
|
||||
arg->parm = fmt->parm;
|
||||
return fmt->scnprintf(bf, size, arg);
|
||||
}
|
||||
return scnprintf(bf, size, "%ld", val);
|
||||
}
|
||||
@ -1736,12 +1848,13 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
|
||||
if (arg.mask & bit)
|
||||
continue;
|
||||
|
||||
arg.fmt = &sc->arg_fmt[arg.idx];
|
||||
val = syscall_arg__val(&arg, arg.idx);
|
||||
/*
|
||||
* Some syscall args need some mask, most don't and
|
||||
* return val untouched.
|
||||
*/
|
||||
val = syscall__mask_val(sc, &arg, val);
|
||||
val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
|
||||
|
||||
/*
|
||||
* Suppress this argument if its value is zero and
|
||||
@ -1762,7 +1875,8 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
|
||||
if (trace->show_arg_names)
|
||||
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
|
||||
|
||||
printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
|
||||
printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
|
||||
bf + printed, size - printed, &arg, val);
|
||||
}
|
||||
} else if (IS_ERR(sc->tp_format)) {
|
||||
/*
|
||||
@ -1777,7 +1891,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
|
||||
if (printed)
|
||||
printed += scnprintf(bf + printed, size - printed, ", ");
|
||||
printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
|
||||
printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
|
||||
printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
|
||||
next_arg:
|
||||
++arg.idx;
|
||||
bit <<= 1;
|
||||
@ -2346,6 +2460,71 @@ static void bpf_output__fprintf(struct trace *trace,
|
||||
++trace->nr_events_printed;
|
||||
}
|
||||
|
||||
static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
|
||||
struct thread *thread, void *augmented_args, int augmented_args_size)
|
||||
{
|
||||
char bf[2048];
|
||||
size_t size = sizeof(bf);
|
||||
struct tep_format_field *field = evsel->tp_format->format.fields;
|
||||
struct syscall_arg_fmt *arg = evsel->priv;
|
||||
size_t printed = 0;
|
||||
unsigned long val;
|
||||
u8 bit = 1;
|
||||
struct syscall_arg syscall_arg = {
|
||||
.augmented = {
|
||||
.size = augmented_args_size,
|
||||
.args = augmented_args,
|
||||
},
|
||||
.idx = 0,
|
||||
.mask = 0,
|
||||
.trace = trace,
|
||||
.thread = thread,
|
||||
.show_string_prefix = trace->show_string_prefix,
|
||||
};
|
||||
|
||||
for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
|
||||
if (syscall_arg.mask & bit)
|
||||
continue;
|
||||
|
||||
syscall_arg.fmt = arg;
|
||||
if (field->flags & TEP_FIELD_IS_ARRAY)
|
||||
val = (uintptr_t)(sample->raw_data + field->offset);
|
||||
else
|
||||
val = format_field__intval(field, sample, evsel->needs_swap);
|
||||
/*
|
||||
* Some syscall args need some mask, most don't and
|
||||
* return val untouched.
|
||||
*/
|
||||
val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
|
||||
|
||||
/*
|
||||
* Suppress this argument if its value is zero and
|
||||
* and we don't have a string associated in an
|
||||
* strarray for it.
|
||||
*/
|
||||
if (val == 0 &&
|
||||
!trace->show_zeros &&
|
||||
!((arg->show_zero ||
|
||||
arg->scnprintf == SCA_STRARRAY ||
|
||||
arg->scnprintf == SCA_STRARRAYS) &&
|
||||
arg->parm))
|
||||
continue;
|
||||
|
||||
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
|
||||
|
||||
/*
|
||||
* XXX Perhaps we should have a show_tp_arg_names,
|
||||
* leaving show_arg_names just for syscalls?
|
||||
*/
|
||||
if (1 || trace->show_arg_names)
|
||||
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
|
||||
|
||||
printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
|
||||
}
|
||||
|
||||
return printed + fprintf(trace->output, "%s", bf);
|
||||
}
|
||||
|
||||
static int trace__event_handler(struct trace *trace, struct evsel *evsel,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_sample *sample)
|
||||
@ -2399,16 +2578,20 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
|
||||
*/
|
||||
}
|
||||
|
||||
fprintf(trace->output, "%s:", evsel->name);
|
||||
fprintf(trace->output, "%s(", evsel->name);
|
||||
|
||||
if (perf_evsel__is_bpf_output(evsel)) {
|
||||
bpf_output__fprintf(trace, sample);
|
||||
} else if (evsel->tp_format) {
|
||||
if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
|
||||
trace__fprintf_sys_enter(trace, evsel, sample)) {
|
||||
event_format__fprintf(evsel->tp_format, sample->cpu,
|
||||
sample->raw_data, sample->raw_size,
|
||||
trace->output);
|
||||
if (trace->libtraceevent_print) {
|
||||
event_format__fprintf(evsel->tp_format, sample->cpu,
|
||||
sample->raw_data, sample->raw_size,
|
||||
trace->output);
|
||||
} else {
|
||||
trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
|
||||
}
|
||||
++trace->nr_events_printed;
|
||||
|
||||
if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
|
||||
@ -2419,7 +2602,7 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
|
||||
}
|
||||
|
||||
newline:
|
||||
fprintf(trace->output, "\n");
|
||||
fprintf(trace->output, ")\n");
|
||||
|
||||
if (callchain_ret > 0)
|
||||
trace__fprintf_callchain(trace, sample);
|
||||
@ -3103,7 +3286,27 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
|
||||
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
|
||||
static void trace__delete_augmented_syscalls(struct trace *trace)
|
||||
{
|
||||
struct evsel *evsel, *tmp;
|
||||
|
||||
evlist__remove(trace->evlist, trace->syscalls.events.augmented);
|
||||
evsel__delete(trace->syscalls.events.augmented);
|
||||
trace->syscalls.events.augmented = NULL;
|
||||
|
||||
evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
|
||||
if (evsel->bpf_obj == trace->bpf_obj) {
|
||||
evlist__remove(trace->evlist, evsel);
|
||||
evsel__delete(evsel);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bpf_object__close(trace->bpf_obj);
|
||||
trace->bpf_obj = NULL;
|
||||
}
|
||||
#else // HAVE_LIBBPF_SUPPORT
|
||||
static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
@ -3124,8 +3327,27 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
|
||||
{
|
||||
}
|
||||
#endif // HAVE_LIBBPF_SUPPORT
|
||||
|
||||
static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(trace->evlist, evsel) {
|
||||
if (evsel == trace->syscalls.events.augmented ||
|
||||
evsel->bpf_obj == trace->bpf_obj)
|
||||
continue;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int trace__set_ev_qualifier_filter(struct trace *trace)
|
||||
{
|
||||
if (trace->syscalls.map)
|
||||
@ -3175,7 +3397,7 @@ static int trace__set_filter_loop_pids(struct trace *trace)
|
||||
thread = parent;
|
||||
}
|
||||
|
||||
err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
|
||||
err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids);
|
||||
if (!err && trace->filter_pids.map)
|
||||
err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
|
||||
|
||||
@ -3192,8 +3414,8 @@ static int trace__set_filter_pids(struct trace *trace)
|
||||
* we fork the workload in perf_evlist__prepare_workload.
|
||||
*/
|
||||
if (trace->filter_pids.nr > 0) {
|
||||
err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
|
||||
trace->filter_pids.entries);
|
||||
err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
|
||||
trace->filter_pids.entries);
|
||||
if (!err && trace->filter_pids.map) {
|
||||
err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
|
||||
trace->filter_pids.entries);
|
||||
@ -3263,6 +3485,133 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
|
||||
return __trace__deliver_event(trace, event->event);
|
||||
}
|
||||
|
||||
static struct syscall_arg_fmt *perf_evsel__syscall_arg_fmt(struct evsel *evsel, char *arg)
|
||||
{
|
||||
struct tep_format_field *field;
|
||||
struct syscall_arg_fmt *fmt = evsel->priv;
|
||||
|
||||
if (evsel->tp_format == NULL || fmt == NULL)
|
||||
return NULL;
|
||||
|
||||
for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
|
||||
if (strcmp(field->name, arg) == 0)
|
||||
return fmt;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
|
||||
{
|
||||
char *tok, *left = evsel->filter, *new_filter = evsel->filter;
|
||||
|
||||
while ((tok = strpbrk(left, "=<>!")) != NULL) {
|
||||
char *right = tok + 1, *right_end;
|
||||
|
||||
if (*right == '=')
|
||||
++right;
|
||||
|
||||
while (isspace(*right))
|
||||
++right;
|
||||
|
||||
if (*right == '\0')
|
||||
break;
|
||||
|
||||
while (!isalpha(*left))
|
||||
if (++left == tok) {
|
||||
/*
|
||||
* Bail out, can't find the name of the argument that is being
|
||||
* used in the filter, let it try to set this filter, will fail later.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
right_end = right + 1;
|
||||
while (isalnum(*right_end) || *right_end == '_')
|
||||
++right_end;
|
||||
|
||||
if (isalpha(*right)) {
|
||||
struct syscall_arg_fmt *fmt;
|
||||
int left_size = tok - left,
|
||||
right_size = right_end - right;
|
||||
char arg[128];
|
||||
|
||||
while (isspace(left[left_size - 1]))
|
||||
--left_size;
|
||||
|
||||
scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
|
||||
|
||||
fmt = perf_evsel__syscall_arg_fmt(evsel, arg);
|
||||
if (fmt == NULL) {
|
||||
pr_debug("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
|
||||
arg, evsel->name, evsel->filter);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
|
||||
arg, (int)(right - tok), tok, right_size, right);
|
||||
|
||||
if (fmt->strtoul) {
|
||||
u64 val;
|
||||
if (fmt->strtoul(right, right_size, NULL, &val)) {
|
||||
char *n, expansion[19];
|
||||
int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
|
||||
int expansion_offset = right - new_filter;
|
||||
|
||||
pr_debug("%s", expansion);
|
||||
|
||||
if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
|
||||
pr_debug(" out of memory!\n");
|
||||
free(new_filter);
|
||||
return -1;
|
||||
}
|
||||
if (new_filter != evsel->filter)
|
||||
free(new_filter);
|
||||
left = n + expansion_offset + expansion_lenght;
|
||||
new_filter = n;
|
||||
} else {
|
||||
pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
|
||||
right_size, right, arg, evsel->name, evsel->filter);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
|
||||
arg, evsel->name, evsel->filter);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pr_debug("\n");
|
||||
} else {
|
||||
left = right_end;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_filter != evsel->filter) {
|
||||
pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
|
||||
perf_evsel__set_filter(evsel, new_filter);
|
||||
free(new_filter);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
|
||||
{
|
||||
struct evlist *evlist = trace->evlist;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->filter == NULL)
|
||||
continue;
|
||||
|
||||
if (trace__expand_filter(trace, evsel)) {
|
||||
*err_evsel = evsel;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trace__run(struct trace *trace, int argc, const char **argv)
|
||||
{
|
||||
struct evlist *evlist = trace->evlist;
|
||||
@ -3302,7 +3651,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
||||
perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
|
||||
trace__sched_stat_runtime))
|
||||
goto out_error_sched_stat_runtime;
|
||||
|
||||
/*
|
||||
* If a global cgroup was set, apply it to all the events without an
|
||||
* explicit cgroup. I.e.:
|
||||
@ -3405,6 +3753,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
||||
*/
|
||||
trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
|
||||
|
||||
err = trace__expand_filters(trace, &evsel);
|
||||
if (err)
|
||||
goto out_delete_evlist;
|
||||
err = perf_evlist__apply_filters(evlist, &evsel);
|
||||
if (err < 0)
|
||||
goto out_error_apply_filters;
|
||||
@ -3450,17 +3801,17 @@ again:
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
++trace->nr_events;
|
||||
|
||||
err = trace__deliver_event(trace, event);
|
||||
if (err)
|
||||
goto out_disable;
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (interrupted)
|
||||
goto out_disable;
|
||||
@ -3470,7 +3821,7 @@ again:
|
||||
draining = true;
|
||||
}
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (trace->nr_events == before) {
|
||||
@ -3858,12 +4209,14 @@ static int parse_pagefaults(const struct option *opt, const char *str,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void evlist__set_evsel_handler(struct evlist *evlist, void *handler)
|
||||
static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel)
|
||||
evsel->handler = handler;
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->handler == NULL)
|
||||
evsel->handler = handler;
|
||||
}
|
||||
}
|
||||
|
||||
static int evlist__set_syscall_tp_fields(struct evlist *evlist)
|
||||
@ -3874,8 +4227,10 @@ static int evlist__set_syscall_tp_fields(struct evlist *evlist)
|
||||
if (evsel->priv || !evsel->tp_format)
|
||||
continue;
|
||||
|
||||
if (strcmp(evsel->tp_format->system, "syscalls"))
|
||||
if (strcmp(evsel->tp_format->system, "syscalls")) {
|
||||
perf_evsel__init_tp_arg_scnprintf(evsel);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (perf_evsel__init_syscall_tp(evsel))
|
||||
return -1;
|
||||
@ -4029,15 +4384,11 @@ static int trace__config(const char *var, const char *value, void *arg)
|
||||
int err = 0;
|
||||
|
||||
if (!strcmp(var, "trace.add_events")) {
|
||||
struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
|
||||
"event selector. use 'perf list' to list available events",
|
||||
parse_events_option);
|
||||
/*
|
||||
* We can't propagate parse_event_option() return, as it is 1
|
||||
* for failure while perf_config() expects -1.
|
||||
*/
|
||||
if (parse_events_option(&o, value, 0))
|
||||
err = -1;
|
||||
trace->perfconfig_events = strdup(value);
|
||||
if (trace->perfconfig_events == NULL) {
|
||||
pr_err("Not enough memory for %s\n", "trace.add_events");
|
||||
return -1;
|
||||
}
|
||||
} else if (!strcmp(var, "trace.show_timestamp")) {
|
||||
trace->show_tstamp = perf_config_bool(var, value);
|
||||
} else if (!strcmp(var, "trace.show_duration")) {
|
||||
@ -4061,6 +4412,11 @@ static int trace__config(const char *var, const char *value, void *arg)
|
||||
int args_alignment = 0;
|
||||
if (perf_config_int(&args_alignment, var, value) == 0)
|
||||
trace->args_alignment = args_alignment;
|
||||
} else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
|
||||
if (strcasecmp(value, "libtraceevent") == 0)
|
||||
trace->libtraceevent_print = true;
|
||||
else if (strcasecmp(value, "libbeauty") == 0)
|
||||
trace->libtraceevent_print = false;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
@ -4103,6 +4459,8 @@ int cmd_trace(int argc, const char **argv)
|
||||
OPT_CALLBACK('e', "event", &trace, "event",
|
||||
"event/syscall selector. use 'perf list' to list available events",
|
||||
trace__parse_events_option),
|
||||
OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
|
||||
"event filter", parse_filter),
|
||||
OPT_BOOLEAN(0, "comm", &trace.show_comm,
|
||||
"show the thread COMM next to its id"),
|
||||
OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
|
||||
@ -4150,6 +4508,8 @@ int cmd_trace(int argc, const char **argv)
|
||||
OPT_CALLBACK(0, "call-graph", &trace.opts,
|
||||
"record_mode[,record_size]", record_callchain_help,
|
||||
&record_parse_callchain_opt),
|
||||
OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
|
||||
"Use libtraceevent to print the tracepoint arguments."),
|
||||
OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
|
||||
"Show the kernel callchains on the syscall exit path"),
|
||||
OPT_ULONG(0, "max-events", &trace.max_events,
|
||||
@ -4210,6 +4570,37 @@ int cmd_trace(int argc, const char **argv)
|
||||
argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
|
||||
trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
|
||||
/*
|
||||
* Here we already passed thru trace__parse_events_option() and it has
|
||||
* already figured out if -e syscall_name, if not but if --event
|
||||
* foo:bar was used, the user is interested _just_ in those, say,
|
||||
* tracepoint events, not in the strace-like syscall-name-based mode.
|
||||
*
|
||||
* This is important because we need to check if strace-like mode is
|
||||
* needed to decided if we should filter out the eBPF
|
||||
* __augmented_syscalls__ code, if it is in the mix, say, via
|
||||
* .perfconfig trace.add_events, and filter those out.
|
||||
*/
|
||||
if (!trace.trace_syscalls && !trace.trace_pgfaults &&
|
||||
trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
|
||||
trace.trace_syscalls = true;
|
||||
}
|
||||
/*
|
||||
* Now that we have --verbose figured out, lets see if we need to parse
|
||||
* events from .perfconfig, so that if those events fail parsing, say some
|
||||
* BPF program fails, then we'll be able to use --verbose to see what went
|
||||
* wrong in more detail.
|
||||
*/
|
||||
if (trace.perfconfig_events != NULL) {
|
||||
struct parse_events_error parse_err = { .idx = 0, };
|
||||
|
||||
err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
|
||||
if (err) {
|
||||
parse_events_print_error(&parse_err, trace.perfconfig_events);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
|
||||
usage_with_options_msg(trace_usage, trace_options,
|
||||
"cgroup monitoring only available in system-wide mode");
|
||||
@ -4238,9 +4629,45 @@ int cmd_trace(int argc, const char **argv)
|
||||
|
||||
trace.bpf_obj = evsel->bpf_obj;
|
||||
|
||||
trace__set_bpf_map_filtered_pids(&trace);
|
||||
trace__set_bpf_map_syscalls(&trace);
|
||||
trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
|
||||
/*
|
||||
* If we have _just_ the augmenter event but don't have a
|
||||
* explicit --syscalls, then assume we want all strace-like
|
||||
* syscalls:
|
||||
*/
|
||||
if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
|
||||
trace.trace_syscalls = true;
|
||||
/*
|
||||
* So, if we have a syscall augmenter, but trace_syscalls, aka
|
||||
* strace-like syscall tracing is not set, then we need to trow
|
||||
* away the augmenter, i.e. all the events that were created
|
||||
* from that BPF object file.
|
||||
*
|
||||
* This is more to fix the current .perfconfig trace.add_events
|
||||
* style of setting up the strace-like eBPF based syscall point
|
||||
* payload augmenter.
|
||||
*
|
||||
* All this complexity will be avoided by adding an alternative
|
||||
* to trace.add_events in the form of
|
||||
* trace.bpf_augmented_syscalls, that will be only parsed if we
|
||||
* need it.
|
||||
*
|
||||
* .perfconfig trace.add_events is still useful if we want, for
|
||||
* instance, have msr_write.msr in some .perfconfig profile based
|
||||
* 'perf trace --config determinism.profile' mode, where for some
|
||||
* particular goal/workload type we want a set of events and
|
||||
* output mode (with timings, etc) instead of having to add
|
||||
* all via the command line.
|
||||
*
|
||||
* Also --config to specify an alternate .perfconfig file needs
|
||||
* to be implemented.
|
||||
*/
|
||||
if (!trace.trace_syscalls) {
|
||||
trace__delete_augmented_syscalls(&trace);
|
||||
} else {
|
||||
trace__set_bpf_map_filtered_pids(&trace);
|
||||
trace__set_bpf_map_syscalls(&trace);
|
||||
trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
|
||||
}
|
||||
}
|
||||
|
||||
err = bpf__setup_stdout(trace.evlist);
|
||||
@ -4287,7 +4714,7 @@ int cmd_trace(int argc, const char **argv)
|
||||
}
|
||||
|
||||
if (trace.evlist->core.nr_entries > 0) {
|
||||
evlist__set_evsel_handler(trace.evlist, trace__event_handler);
|
||||
evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
|
||||
if (evlist__set_syscall_tp_fields(trace.evlist)) {
|
||||
perror("failed to set syscalls:* tracepoint fields");
|
||||
goto out;
|
||||
@ -4383,11 +4810,6 @@ init_augmented_syscall_tp:
|
||||
if (trace.summary_only)
|
||||
trace.summary = trace.summary_only;
|
||||
|
||||
if (!trace.trace_syscalls && !trace.trace_pgfaults &&
|
||||
trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
|
||||
trace.trace_syscalls = true;
|
||||
}
|
||||
|
||||
if (output_name != NULL) {
|
||||
err = trace__open_output(&trace, output_name);
|
||||
if (err < 0) {
|
||||
@ -4426,5 +4848,6 @@ out_close:
|
||||
if (output_name != NULL)
|
||||
fclose(trace.output);
|
||||
out:
|
||||
zfree(&trace.perfconfig_events);
|
||||
return err;
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ arch/x86/include/asm/disabled-features.h
|
||||
arch/x86/include/asm/required-features.h
|
||||
arch/x86/include/asm/cpufeatures.h
|
||||
arch/x86/include/asm/inat_types.h
|
||||
arch/x86/include/asm/msr-index.h
|
||||
arch/x86/include/uapi/asm/prctl.h
|
||||
arch/x86/lib/x86-opcode-map.txt
|
||||
arch/x86/tools/gen-insn-attr-x86.awk
|
||||
|
@ -3,6 +3,7 @@ libperf-y += cpumap.o
|
||||
libperf-y += threadmap.o
|
||||
libperf-y += evsel.o
|
||||
libperf-y += evlist.o
|
||||
libperf-y += mmap.o
|
||||
libperf-y += zalloc.o
|
||||
libperf-y += xyarray.o
|
||||
libperf-y += lib.o
|
||||
|
@ -172,8 +172,9 @@ install_headers:
|
||||
$(call do_install,include/perf/cpumap.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644);
|
||||
$(call do_install,include/perf/event.h,$(prefix)/include/perf,644);
|
||||
$(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/event.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/mmap.h,$(prefix)/include/perf,644);
|
||||
|
||||
install_pkgconfig: $(LIBPERF_PC)
|
||||
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
|
||||
|
@ -5,11 +5,12 @@
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <perf/core.h>
|
||||
#include <internal/lib.h>
|
||||
#include "internal.h"
|
||||
|
||||
static int __base_pr(enum libperf_print_level level, const char *format,
|
||||
static int __base_pr(enum libperf_print_level level __maybe_unused, const char *format,
|
||||
va_list args)
|
||||
{
|
||||
return vfprintf(stderr, format, args);
|
||||
|
@ -8,13 +8,20 @@
|
||||
#include <internal/evlist.h>
|
||||
#include <internal/evsel.h>
|
||||
#include <internal/xyarray.h>
|
||||
#include <internal/mmap.h>
|
||||
#include <internal/cpumap.h>
|
||||
#include <internal/threadmap.h>
|
||||
#include <internal/xyarray.h>
|
||||
#include <internal/lib.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <poll.h>
|
||||
#include <sys/mman.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <api/fd/array.h>
|
||||
@ -27,6 +34,7 @@ void perf_evlist__init(struct perf_evlist *evlist)
|
||||
INIT_HLIST_HEAD(&evlist->heads[i]);
|
||||
INIT_LIST_HEAD(&evlist->entries);
|
||||
evlist->nr_entries = 0;
|
||||
fdarray__init(&evlist->pollfd, 64);
|
||||
}
|
||||
|
||||
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
|
||||
@ -101,8 +109,36 @@ perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
|
||||
return next;
|
||||
}
|
||||
|
||||
static void perf_evlist__purge(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *pos, *n;
|
||||
|
||||
perf_evlist__for_each_entry_safe(evlist, n, pos) {
|
||||
list_del_init(&pos->node);
|
||||
perf_evsel__delete(pos);
|
||||
}
|
||||
|
||||
evlist->nr_entries = 0;
|
||||
}
|
||||
|
||||
void perf_evlist__exit(struct perf_evlist *evlist)
|
||||
{
|
||||
perf_cpu_map__put(evlist->cpus);
|
||||
perf_thread_map__put(evlist->threads);
|
||||
evlist->cpus = NULL;
|
||||
evlist->threads = NULL;
|
||||
fdarray__exit(&evlist->pollfd);
|
||||
}
|
||||
|
||||
void perf_evlist__delete(struct perf_evlist *evlist)
|
||||
{
|
||||
if (evlist == NULL)
|
||||
return;
|
||||
|
||||
perf_evlist__munmap(evlist);
|
||||
perf_evlist__close(evlist);
|
||||
perf_evlist__purge(evlist);
|
||||
perf_evlist__exit(evlist);
|
||||
free(evlist);
|
||||
}
|
||||
|
||||
@ -277,7 +313,295 @@ int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
|
||||
void *arg __maybe_unused)
|
||||
{
|
||||
struct perf_mmap *map = fda->priv[fd].ptr;
|
||||
|
||||
if (map)
|
||||
perf_mmap__put(map);
|
||||
}
|
||||
|
||||
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
|
||||
{
|
||||
return fdarray__filter(&evlist->pollfd, revents_and_mask,
|
||||
perf_evlist__munmap_filtered, NULL);
|
||||
}
|
||||
|
||||
int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
|
||||
{
|
||||
return fdarray__poll(&evlist->pollfd, timeout);
|
||||
}
|
||||
|
||||
static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
|
||||
{
|
||||
int i;
|
||||
struct perf_mmap *map;
|
||||
|
||||
evlist->nr_mmaps = perf_cpu_map__nr(evlist->cpus);
|
||||
if (perf_cpu_map__empty(evlist->cpus))
|
||||
evlist->nr_mmaps = perf_thread_map__nr(evlist->threads);
|
||||
|
||||
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
||||
if (!map)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
/*
|
||||
* When the perf_mmap() call is made we grab one refcount, plus
|
||||
* one extra to let perf_mmap__consume() get the last
|
||||
* events after all real references (perf_mmap__get()) are
|
||||
* dropped.
|
||||
*
|
||||
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
|
||||
* thus does perf_mmap__get() on it.
|
||||
*/
|
||||
perf_mmap__init(&map[i], overwrite, NULL);
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel, int idx, int cpu,
|
||||
int thread)
|
||||
{
|
||||
struct perf_sample_id *sid = SID(evsel, cpu, thread);
|
||||
|
||||
sid->idx = idx;
|
||||
if (evlist->cpus && cpu >= 0)
|
||||
sid->cpu = evlist->cpus->map[cpu];
|
||||
else
|
||||
sid->cpu = -1;
|
||||
if (!evsel->system_wide && evlist->threads && thread >= 0)
|
||||
sid->tid = perf_thread_map__pid(evlist->threads, thread);
|
||||
else
|
||||
sid->tid = -1;
|
||||
}
|
||||
|
||||
static struct perf_mmap*
|
||||
perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
|
||||
{
|
||||
struct perf_mmap *map = &evlist->mmap[idx];
|
||||
|
||||
if (overwrite) {
|
||||
if (!evlist->mmap_ovw) {
|
||||
evlist->mmap_ovw = perf_evlist__alloc_mmap(evlist, true);
|
||||
if (!evlist->mmap_ovw)
|
||||
return NULL;
|
||||
}
|
||||
map = &evlist->mmap_ovw[idx];
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
|
||||
|
||||
static int
|
||||
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
|
||||
int output, int cpu)
|
||||
{
|
||||
return perf_mmap__mmap(map, mp, output, cpu);
|
||||
}
|
||||
|
||||
static int
|
||||
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||||
int idx, struct perf_mmap_param *mp, int cpu_idx,
|
||||
int thread, int *_output, int *_output_overwrite)
|
||||
{
|
||||
int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
|
||||
struct perf_evsel *evsel;
|
||||
int revent;
|
||||
|
||||
perf_evlist__for_each_entry(evlist, evsel) {
|
||||
bool overwrite = evsel->attr.write_backward;
|
||||
struct perf_mmap *map;
|
||||
int *output, fd, cpu;
|
||||
|
||||
if (evsel->system_wide && thread)
|
||||
continue;
|
||||
|
||||
cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
|
||||
if (cpu == -1)
|
||||
continue;
|
||||
|
||||
map = ops->get(evlist, overwrite, idx);
|
||||
if (map == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (overwrite) {
|
||||
mp->prot = PROT_READ;
|
||||
output = _output_overwrite;
|
||||
} else {
|
||||
mp->prot = PROT_READ | PROT_WRITE;
|
||||
output = _output;
|
||||
}
|
||||
|
||||
fd = FD(evsel, cpu, thread);
|
||||
|
||||
if (*output == -1) {
|
||||
*output = fd;
|
||||
|
||||
/*
|
||||
* The last one will be done at perf_mmap__consume(), so that we
|
||||
* make sure we don't prevent tools from consuming every last event in
|
||||
* the ring buffer.
|
||||
*
|
||||
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
|
||||
* anymore, but the last events for it are still in the ring buffer,
|
||||
* waiting to be consumed.
|
||||
*
|
||||
* Tools can chose to ignore this at their own discretion, but the
|
||||
* evlist layer can't just drop it when filtering events in
|
||||
* perf_evlist__filter_pollfd().
|
||||
*/
|
||||
refcount_set(&map->refcnt, 2);
|
||||
|
||||
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
|
||||
return -1;
|
||||
} else {
|
||||
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
|
||||
return -1;
|
||||
|
||||
perf_mmap__get(map);
|
||||
}
|
||||
|
||||
revent = !overwrite ? POLLIN : 0;
|
||||
|
||||
if (!evsel->system_wide &&
|
||||
perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
|
||||
perf_mmap__put(map);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (evsel->attr.read_format & PERF_FORMAT_ID) {
|
||||
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
|
||||
fd) < 0)
|
||||
return -1;
|
||||
perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
|
||||
thread);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||||
struct perf_mmap_param *mp)
|
||||
{
|
||||
int thread;
|
||||
int nr_threads = perf_thread_map__nr(evlist->threads);
|
||||
|
||||
for (thread = 0; thread < nr_threads; thread++) {
|
||||
int output = -1;
|
||||
int output_overwrite = -1;
|
||||
|
||||
if (ops->idx)
|
||||
ops->idx(evlist, mp, thread, false);
|
||||
|
||||
if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
|
||||
&output, &output_overwrite))
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
perf_evlist__munmap(evlist);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||||
struct perf_mmap_param *mp)
|
||||
{
|
||||
int nr_threads = perf_thread_map__nr(evlist->threads);
|
||||
int nr_cpus = perf_cpu_map__nr(evlist->cpus);
|
||||
int cpu, thread;
|
||||
|
||||
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
||||
int output = -1;
|
||||
int output_overwrite = -1;
|
||||
|
||||
if (ops->idx)
|
||||
ops->idx(evlist, mp, cpu, true);
|
||||
|
||||
for (thread = 0; thread < nr_threads; thread++) {
|
||||
if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
|
||||
thread, &output, &output_overwrite))
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
perf_evlist__munmap(evlist);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int perf_evlist__mmap_ops(struct perf_evlist *evlist,
|
||||
struct perf_evlist_mmap_ops *ops,
|
||||
struct perf_mmap_param *mp)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
const struct perf_cpu_map *cpus = evlist->cpus;
|
||||
const struct perf_thread_map *threads = evlist->threads;
|
||||
|
||||
if (!ops || !ops->get || !ops->mmap)
|
||||
return -EINVAL;
|
||||
|
||||
if (!evlist->mmap)
|
||||
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
|
||||
if (!evlist->mmap)
|
||||
return -ENOMEM;
|
||||
|
||||
perf_evlist__for_each_entry(evlist, evsel) {
|
||||
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
|
||||
evsel->sample_id == NULL &&
|
||||
perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
if (perf_cpu_map__empty(cpus))
|
||||
return mmap_per_thread(evlist, ops, mp);
|
||||
|
||||
return mmap_per_cpu(evlist, ops, mp);
|
||||
}
|
||||
|
||||
int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
|
||||
{
|
||||
struct perf_mmap_param mp;
|
||||
struct perf_evlist_mmap_ops ops = {
|
||||
.get = perf_evlist__mmap_cb_get,
|
||||
.mmap = perf_evlist__mmap_cb_mmap,
|
||||
};
|
||||
|
||||
evlist->mmap_len = (pages + 1) * page_size;
|
||||
mp.mask = evlist->mmap_len - page_size - 1;
|
||||
|
||||
return perf_evlist__mmap_ops(evlist, &ops, &mp);
|
||||
}
|
||||
|
||||
void perf_evlist__munmap(struct perf_evlist *evlist)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (evlist->mmap) {
|
||||
for (i = 0; i < evlist->nr_mmaps; i++)
|
||||
perf_mmap__munmap(&evlist->mmap[i]);
|
||||
}
|
||||
|
||||
if (evlist->mmap_ovw) {
|
||||
for (i = 0; i < evlist->nr_mmaps; i++)
|
||||
perf_mmap__munmap(&evlist->mmap_ovw[i]);
|
||||
}
|
||||
|
||||
zfree(&evlist->mmap);
|
||||
zfree(&evlist->mmap_ovw);
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
struct perf_mmap_param;
|
||||
|
||||
struct perf_evlist {
|
||||
struct list_head entries;
|
||||
@ -22,12 +23,33 @@ struct perf_evlist {
|
||||
size_t mmap_len;
|
||||
struct fdarray pollfd;
|
||||
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
|
||||
struct perf_mmap *mmap;
|
||||
struct perf_mmap *mmap_ovw;
|
||||
};
|
||||
|
||||
typedef void
|
||||
(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
|
||||
typedef struct perf_mmap*
|
||||
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
|
||||
typedef int
|
||||
(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
|
||||
|
||||
struct perf_evlist_mmap_ops {
|
||||
perf_evlist_mmap__cb_idx_t idx;
|
||||
perf_evlist_mmap__cb_get_t get;
|
||||
perf_evlist_mmap__cb_mmap_t mmap;
|
||||
};
|
||||
|
||||
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
|
||||
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
|
||||
void *ptr, short revent);
|
||||
|
||||
int perf_evlist__mmap_ops(struct perf_evlist *evlist,
|
||||
struct perf_evlist_mmap_ops *ops,
|
||||
struct perf_mmap_param *mp);
|
||||
|
||||
void perf_evlist__exit(struct perf_evlist *evlist);
|
||||
|
||||
/**
|
||||
* __perf_evlist__for_each_entry - iterate thru all the evsels
|
||||
* @list: list_head instance to iterate
|
||||
@ -60,6 +82,24 @@ int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
|
||||
#define perf_evlist__for_each_entry_reverse(evlist, evsel) \
|
||||
__perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
|
||||
|
||||
/**
|
||||
* __perf_evlist__for_each_entry_safe - safely iterate thru all the evsels
|
||||
* @list: list_head instance to iterate
|
||||
* @tmp: struct evsel temp iterator
|
||||
* @evsel: struct evsel iterator
|
||||
*/
|
||||
#define __perf_evlist__for_each_entry_safe(list, tmp, evsel) \
|
||||
list_for_each_entry_safe(evsel, tmp, list, node)
|
||||
|
||||
/**
|
||||
* perf_evlist__for_each_entry_safe - safely iterate thru all the evsels
|
||||
* @evlist: evlist instance to iterate
|
||||
* @evsel: struct evsel iterator
|
||||
* @tmp: struct evsel temp iterator
|
||||
*/
|
||||
#define perf_evlist__for_each_entry_safe(evlist, tmp, evsel) \
|
||||
__perf_evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
|
||||
|
||||
static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
|
||||
{
|
||||
return list_entry(evlist->entries.next, struct perf_evsel, node);
|
||||
|
@ -10,23 +10,45 @@
|
||||
/* perf sample has 16 bits size limit */
|
||||
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
|
||||
|
||||
struct perf_mmap;
|
||||
|
||||
typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
|
||||
|
||||
/**
|
||||
* struct perf_mmap - perf's ring buffer mmap details
|
||||
*
|
||||
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
|
||||
*/
|
||||
struct perf_mmap {
|
||||
void *base;
|
||||
int mask;
|
||||
int fd;
|
||||
int cpu;
|
||||
refcount_t refcnt;
|
||||
u64 prev;
|
||||
u64 start;
|
||||
u64 end;
|
||||
bool overwrite;
|
||||
u64 flush;
|
||||
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
|
||||
void *base;
|
||||
int mask;
|
||||
int fd;
|
||||
int cpu;
|
||||
refcount_t refcnt;
|
||||
u64 prev;
|
||||
u64 start;
|
||||
u64 end;
|
||||
bool overwrite;
|
||||
u64 flush;
|
||||
libperf_unmap_cb_t unmap_cb;
|
||||
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
|
||||
};
|
||||
|
||||
struct perf_mmap_param {
|
||||
int prot;
|
||||
int mask;
|
||||
};
|
||||
|
||||
size_t perf_mmap__mmap_len(struct perf_mmap *map);
|
||||
|
||||
void perf_mmap__init(struct perf_mmap *map, bool overwrite,
|
||||
libperf_unmap_cb_t unmap_cb);
|
||||
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
|
||||
int fd, int cpu);
|
||||
void perf_mmap__munmap(struct perf_mmap *map);
|
||||
void perf_mmap__get(struct perf_mmap *map);
|
||||
void perf_mmap__put(struct perf_mmap *map);
|
||||
|
||||
u64 perf_mmap__read_head(struct perf_mmap *map);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_MMAP_H */
|
||||
|
@ -12,6 +12,8 @@ enum libperf_print_level {
|
||||
LIBPERF_WARN,
|
||||
LIBPERF_INFO,
|
||||
LIBPERF_DEBUG,
|
||||
LIBPERF_DEBUG2,
|
||||
LIBPERF_DEBUG3,
|
||||
};
|
||||
|
||||
typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
|
||||
|
@ -32,5 +32,10 @@ LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist,
|
||||
struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads);
|
||||
LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
|
||||
LIBPERF_API int perf_evlist__filter_pollfd(struct perf_evlist *evlist,
|
||||
short revents_and_mask);
|
||||
|
||||
LIBPERF_API int perf_evlist__mmap(struct perf_evlist *evlist, int pages);
|
||||
LIBPERF_API void perf_evlist__munmap(struct perf_evlist *evlist);
|
||||
|
||||
#endif /* __LIBPERF_EVLIST_H */
|
||||
|
15
tools/perf/lib/include/perf/mmap.h
Normal file
15
tools/perf/lib/include/perf/mmap.h
Normal file
@ -0,0 +1,15 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_MMAP_H
|
||||
#define __LIBPERF_MMAP_H
|
||||
|
||||
#include <perf/core.h>
|
||||
|
||||
struct perf_mmap;
|
||||
union perf_event;
|
||||
|
||||
LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
|
||||
LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
|
||||
LIBPERF_API void perf_mmap__read_done(struct perf_mmap *map);
|
||||
LIBPERF_API union perf_event *perf_mmap__read_event(struct perf_mmap *map);
|
||||
|
||||
#endif /* __LIBPERF_MMAP_H */
|
@ -14,5 +14,7 @@ do { \
|
||||
#define pr_warning(fmt, ...) __pr(LIBPERF_WARN, fmt, ##__VA_ARGS__)
|
||||
#define pr_info(fmt, ...) __pr(LIBPERF_INFO, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug(fmt, ...) __pr(LIBPERF_DEBUG, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug2(fmt, ...) __pr(LIBPERF_DEBUG2, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug3(fmt, ...) __pr(LIBPERF_DEBUG3, fmt, ##__VA_ARGS__)
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_H */
|
||||
|
@ -40,6 +40,13 @@ LIBPERF_0.0.1 {
|
||||
perf_evlist__next;
|
||||
perf_evlist__set_maps;
|
||||
perf_evlist__poll;
|
||||
perf_evlist__mmap;
|
||||
perf_evlist__munmap;
|
||||
perf_evlist__filter_pollfd;
|
||||
perf_mmap__consume;
|
||||
perf_mmap__read_init;
|
||||
perf_mmap__read_done;
|
||||
perf_mmap__read_event;
|
||||
local:
|
||||
*;
|
||||
};
|
||||
|
273
tools/perf/lib/mmap.c
Normal file
273
tools/perf/lib/mmap.c
Normal file
@ -0,0 +1,273 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <sys/mman.h>
|
||||
#include <inttypes.h>
|
||||
#include <asm/bug.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <linux/ring_buffer.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/mmap.h>
|
||||
#include <perf/event.h>
|
||||
#include <internal/mmap.h>
|
||||
#include <internal/lib.h>
|
||||
#include <linux/kernel.h>
|
||||
#include "internal.h"
|
||||
|
||||
void perf_mmap__init(struct perf_mmap *map, bool overwrite,
|
||||
libperf_unmap_cb_t unmap_cb)
|
||||
{
|
||||
map->fd = -1;
|
||||
map->overwrite = overwrite;
|
||||
map->unmap_cb = unmap_cb;
|
||||
refcount_set(&map->refcnt, 0);
|
||||
}
|
||||
|
||||
size_t perf_mmap__mmap_len(struct perf_mmap *map)
|
||||
{
|
||||
return map->mask + 1 + page_size;
|
||||
}
|
||||
|
||||
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
|
||||
int fd, int cpu)
|
||||
{
|
||||
map->prev = 0;
|
||||
map->mask = mp->mask;
|
||||
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
|
||||
MAP_SHARED, fd, 0);
|
||||
if (map->base == MAP_FAILED) {
|
||||
map->base = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
map->fd = fd;
|
||||
map->cpu = cpu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void perf_mmap__munmap(struct perf_mmap *map)
|
||||
{
|
||||
if (map && map->base != NULL) {
|
||||
munmap(map->base, perf_mmap__mmap_len(map));
|
||||
map->base = NULL;
|
||||
map->fd = -1;
|
||||
refcount_set(&map->refcnt, 0);
|
||||
}
|
||||
if (map && map->unmap_cb)
|
||||
map->unmap_cb(map);
|
||||
}
|
||||
|
||||
void perf_mmap__get(struct perf_mmap *map)
|
||||
{
|
||||
refcount_inc(&map->refcnt);
|
||||
}
|
||||
|
||||
void perf_mmap__put(struct perf_mmap *map)
|
||||
{
|
||||
BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
|
||||
|
||||
if (refcount_dec_and_test(&map->refcnt))
|
||||
perf_mmap__munmap(map);
|
||||
}
|
||||
|
||||
static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
|
||||
{
|
||||
ring_buffer_write_tail(md->base, tail);
|
||||
}
|
||||
|
||||
u64 perf_mmap__read_head(struct perf_mmap *map)
|
||||
{
|
||||
return ring_buffer_read_head(map->base);
|
||||
}
|
||||
|
||||
static bool perf_mmap__empty(struct perf_mmap *map)
|
||||
{
|
||||
struct perf_event_mmap_page *pc = map->base;
|
||||
|
||||
return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
|
||||
}
|
||||
|
||||
void perf_mmap__consume(struct perf_mmap *map)
|
||||
{
|
||||
if (!map->overwrite) {
|
||||
u64 old = map->prev;
|
||||
|
||||
perf_mmap__write_tail(map, old);
|
||||
}
|
||||
|
||||
if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
|
||||
perf_mmap__put(map);
|
||||
}
|
||||
|
||||
static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
|
||||
{
|
||||
struct perf_event_header *pheader;
|
||||
u64 evt_head = *start;
|
||||
int size = mask + 1;
|
||||
|
||||
pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
|
||||
pheader = (struct perf_event_header *)(buf + (*start & mask));
|
||||
while (true) {
|
||||
if (evt_head - *start >= (unsigned int)size) {
|
||||
pr_debug("Finished reading overwrite ring buffer: rewind\n");
|
||||
if (evt_head - *start > (unsigned int)size)
|
||||
evt_head -= pheader->size;
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
|
||||
|
||||
if (pheader->size == 0) {
|
||||
pr_debug("Finished reading overwrite ring buffer: get start\n");
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
evt_head += pheader->size;
|
||||
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
|
||||
}
|
||||
WARN_ONCE(1, "Shouldn't get here\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report the start and end of the available data in ringbuffer
|
||||
*/
|
||||
static int __perf_mmap__read_init(struct perf_mmap *md)
|
||||
{
|
||||
u64 head = perf_mmap__read_head(md);
|
||||
u64 old = md->prev;
|
||||
unsigned char *data = md->base + page_size;
|
||||
unsigned long size;
|
||||
|
||||
md->start = md->overwrite ? head : old;
|
||||
md->end = md->overwrite ? old : head;
|
||||
|
||||
if ((md->end - md->start) < md->flush)
|
||||
return -EAGAIN;
|
||||
|
||||
size = md->end - md->start;
|
||||
if (size > (unsigned long)(md->mask) + 1) {
|
||||
if (!md->overwrite) {
|
||||
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
|
||||
|
||||
md->prev = head;
|
||||
perf_mmap__consume(md);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Backward ring buffer is full. We still have a chance to read
|
||||
* most of data from it.
|
||||
*/
|
||||
if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_mmap__read_init(struct perf_mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->refcnt))
|
||||
return -ENOENT;
|
||||
|
||||
return __perf_mmap__read_init(map);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mandatory for overwrite mode
|
||||
* The direction of overwrite mode is backward.
|
||||
* The last perf_mmap__read() will set tail to map->core.prev.
|
||||
* Need to correct the map->core.prev to head which is the end of next read.
|
||||
*/
|
||||
void perf_mmap__read_done(struct perf_mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->refcnt))
|
||||
return;
|
||||
|
||||
map->prev = perf_mmap__read_head(map);
|
||||
}
|
||||
|
||||
/* When check_messup is true, 'end' must points to a good entry */
|
||||
static union perf_event *perf_mmap__read(struct perf_mmap *map,
|
||||
u64 *startp, u64 end)
|
||||
{
|
||||
unsigned char *data = map->base + page_size;
|
||||
union perf_event *event = NULL;
|
||||
int diff = end - *startp;
|
||||
|
||||
if (diff >= (int)sizeof(event->header)) {
|
||||
size_t size;
|
||||
|
||||
event = (union perf_event *)&data[*startp & map->mask];
|
||||
size = event->header.size;
|
||||
|
||||
if (size < sizeof(event->header) || diff < (int)size)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Event straddles the mmap boundary -- header should always
|
||||
* be inside due to u64 alignment of output.
|
||||
*/
|
||||
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
|
||||
unsigned int offset = *startp;
|
||||
unsigned int len = min(sizeof(*event), size), cpy;
|
||||
void *dst = map->event_copy;
|
||||
|
||||
do {
|
||||
cpy = min(map->mask + 1 - (offset & map->mask), len);
|
||||
memcpy(dst, &data[offset & map->mask], cpy);
|
||||
offset += cpy;
|
||||
dst += cpy;
|
||||
len -= cpy;
|
||||
} while (len);
|
||||
|
||||
event = (union perf_event *)map->event_copy;
|
||||
}
|
||||
|
||||
*startp += size;
|
||||
}
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read event from ring buffer one by one.
|
||||
* Return one event for each call.
|
||||
*
|
||||
* Usage:
|
||||
* perf_mmap__read_init()
|
||||
* while(event = perf_mmap__read_event()) {
|
||||
* //process the event
|
||||
* perf_mmap__consume()
|
||||
* }
|
||||
* perf_mmap__read_done()
|
||||
*/
|
||||
union perf_event *perf_mmap__read_event(struct perf_mmap *map)
|
||||
{
|
||||
union perf_event *event;
|
||||
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->refcnt))
|
||||
return NULL;
|
||||
|
||||
/* non-overwirte doesn't pause the ringbuffer */
|
||||
if (!map->overwrite)
|
||||
map->end = perf_mmap__read_head(map);
|
||||
|
||||
event = perf_mmap__read(map, &map->start, map->end);
|
||||
|
||||
if (!map->overwrite)
|
||||
map->prev = map->start;
|
||||
|
||||
return event;
|
||||
}
|
@ -15,7 +15,9 @@ void test_attr__init(void);
|
||||
void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
|
||||
int fd, int group_fd, unsigned long flags);
|
||||
|
||||
#define HAVE_ATTR_TEST
|
||||
#ifndef HAVE_ATTR_TEST
|
||||
#define HAVE_ATTR_TEST 1
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
sys_perf_event_open(struct perf_event_attr *attr,
|
||||
@ -27,7 +29,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
|
||||
fd = syscall(__NR_perf_event_open, attr, pid, cpu,
|
||||
group_fd, flags);
|
||||
|
||||
#ifdef HAVE_ATTR_TEST
|
||||
#if HAVE_ATTR_TEST
|
||||
if (unlikely(test_attr__enabled))
|
||||
test_attr__open(attr, pid, cpu, fd, group_fd, flags);
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,6 +13,7 @@
|
||||
#include "util/mmap.h"
|
||||
#include <errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#define NR_ITERS 111
|
||||
|
||||
@ -37,8 +38,8 @@ static int count_samples(struct evlist *evlist, int *sample_count,
|
||||
struct mmap *map = &evlist->overwrite_mmap[i];
|
||||
union perf_event *event;
|
||||
|
||||
perf_mmap__read_init(map);
|
||||
while ((event = perf_mmap__read_event(map)) != NULL) {
|
||||
perf_mmap__read_init(&map->core);
|
||||
while ((event = perf_mmap__read_event(&map->core)) != NULL) {
|
||||
const u32 type = event->header.type;
|
||||
|
||||
switch (type) {
|
||||
@ -53,7 +54,7 @@ static int count_samples(struct evlist *evlist, int *sample_count,
|
||||
return TEST_FAIL;
|
||||
}
|
||||
}
|
||||
perf_mmap__read_done(map);
|
||||
perf_mmap__read_done(&map->core);
|
||||
}
|
||||
return TEST_OK;
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <api/fs/fs.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <perf/mmap.h>
|
||||
#include "tests.h"
|
||||
#include "llvm.h"
|
||||
#include "debug.h"
|
||||
@ -184,16 +185,16 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
const u32 type = event->header.type;
|
||||
|
||||
if (type == PERF_RECORD_SAMPLE)
|
||||
count ++;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (count != expect) {
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <sys/param.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "dso.h"
|
||||
@ -425,16 +426,16 @@ static int process_events(struct machine *machine, struct evlist *evlist,
|
||||
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
ret = process_event(machine, evlist, event, state);
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <sys/prctl.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
@ -38,17 +39,17 @@ static int find_comm(struct evlist *evlist, const char *comm)
|
||||
found = 0;
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
if (event->header.type == PERF_RECORD_COMM &&
|
||||
(pid_t)event->comm.pid == getpid() &&
|
||||
(pid_t)event->comm.tid == getpid() &&
|
||||
strcmp(event->comm.comm, comm) == 0)
|
||||
found += 1;
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
/*
|
||||
* This test will generate random numbers of calls to some getpid syscalls,
|
||||
@ -113,10 +114,10 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
}
|
||||
|
||||
md = &evlist->mmap[0];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto out_init;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
struct perf_sample sample;
|
||||
|
||||
if (event->header.type != PERF_RECORD_SAMPLE) {
|
||||
@ -139,9 +140,9 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
nr_events[evsel->idx]++;
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
|
||||
out_init:
|
||||
err = 0;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "debug.h"
|
||||
#include "util/mmap.h"
|
||||
#include <errno.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#ifndef O_DIRECTORY
|
||||
#define O_DIRECTORY 00200000
|
||||
@ -92,10 +93,10 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
const u32 type = event->header.type;
|
||||
int tp_flags;
|
||||
struct perf_sample sample;
|
||||
@ -103,7 +104,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
++nr_events;
|
||||
|
||||
if (type != PERF_RECORD_SAMPLE) {
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -123,7 +124,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
|
||||
goto out_ok;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
if (nr_events == before)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <pthread.h>
|
||||
|
||||
#include <sched.h>
|
||||
#include <perf/mmap.h>
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "debug.h"
|
||||
@ -170,10 +171,10 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
const u32 type = event->header.type;
|
||||
const char *name = perf_event__name(type);
|
||||
|
||||
@ -276,9 +277,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
||||
++errs;
|
||||
}
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "util/mmap.h"
|
||||
#include "util/thread_map.h"
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#define NR_LOOPS 10000000
|
||||
|
||||
@ -99,10 +100,10 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
|
||||
evlist__disable(evlist);
|
||||
|
||||
md = &evlist->mmap[0];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto out_init;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
struct perf_sample sample;
|
||||
|
||||
if (event->header.type != PERF_RECORD_SAMPLE)
|
||||
@ -117,9 +118,9 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
|
||||
total_periods += sample.period;
|
||||
nr_samples++;
|
||||
next_event:
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
|
||||
out_init:
|
||||
if ((u64) nr_samples == total_periods) {
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/zalloc.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
@ -269,17 +270,17 @@ static int process_events(struct evlist *evlist,
|
||||
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
cnt += 1;
|
||||
ret = add_event(evlist, &events, event);
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
if (ret < 0)
|
||||
goto out_free_nodes;
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
}
|
||||
|
||||
events_array = calloc(cnt, sizeof(struct event_node));
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
static int exited;
|
||||
static int nr_exit;
|
||||
@ -117,16 +118,16 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
||||
|
||||
retry:
|
||||
md = &evlist->mmap[0];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto out_init;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
|
||||
if (event->header.type == PERF_RECORD_EXIT)
|
||||
nr_exit++;
|
||||
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
}
|
||||
perf_mmap__read_done(md);
|
||||
perf_mmap__read_done(&md->core);
|
||||
|
||||
out_init:
|
||||
if (!exited || !nr_exit) {
|
||||
|
@ -17,3 +17,4 @@ perf-y += sockaddr.o
|
||||
perf-y += socket.o
|
||||
perf-y += statx.o
|
||||
perf-y += sync_file_range.o
|
||||
perf-y += tracepoints/
|
||||
|
@ -5,9 +5,10 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
struct strarray {
|
||||
int offset;
|
||||
u64 offset;
|
||||
int nr_entries;
|
||||
const char *prefix;
|
||||
const char **entries;
|
||||
@ -29,6 +30,8 @@ struct strarray {
|
||||
size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val);
|
||||
size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, bool show_prefix, unsigned long flags);
|
||||
|
||||
bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret);
|
||||
|
||||
struct trace;
|
||||
struct thread;
|
||||
|
||||
@ -51,6 +54,8 @@ struct strarrays {
|
||||
|
||||
size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val);
|
||||
|
||||
bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret);
|
||||
|
||||
size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size);
|
||||
|
||||
extern struct strarray strarray__socket_families;
|
||||
@ -78,6 +83,8 @@ struct augmented_arg {
|
||||
u64 value[];
|
||||
};
|
||||
|
||||
struct syscall_arg_fmt;
|
||||
|
||||
/**
|
||||
* @val: value of syscall argument being formatted
|
||||
* @args: All the args, use syscall_args__val(arg, nth) to access one
|
||||
@ -94,6 +101,7 @@ struct augmented_arg {
|
||||
struct syscall_arg {
|
||||
unsigned long val;
|
||||
unsigned char *args;
|
||||
struct syscall_arg_fmt *fmt;
|
||||
struct {
|
||||
struct augmented_arg *args;
|
||||
int size;
|
||||
@ -111,6 +119,12 @@ unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx);
|
||||
size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg);
|
||||
#define SCA_STRARRAY_FLAGS syscall_arg__scnprintf_strarray_flags
|
||||
|
||||
size_t syscall_arg__scnprintf_x86_MSR(char *bf, size_t size, struct syscall_arg *arg);
|
||||
#define SCA_X86_MSR syscall_arg__scnprintf_x86_MSR
|
||||
|
||||
bool syscall_arg__strtoul_x86_MSR(char *bf, size_t size, struct syscall_arg *arg, u64 *ret);
|
||||
#define STUL_X86_MSR syscall_arg__strtoul_x86_MSR
|
||||
|
||||
size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, struct syscall_arg *arg);
|
||||
#define SCA_STRARRAYS syscall_arg__scnprintf_strarrays
|
||||
|
||||
|
1
tools/perf/trace/beauty/tracepoints/Build
Normal file
1
tools/perf/trace/beauty/tracepoints/Build
Normal file
@ -0,0 +1 @@
|
||||
perf-y += x86_msr.o
|
39
tools/perf/trace/beauty/tracepoints/x86_msr.c
Normal file
39
tools/perf/trace/beauty/tracepoints/x86_msr.c
Normal file
@ -0,0 +1,39 @@
|
||||
// SPDX-License-Identifier: LGPL-2.1
|
||||
/*
|
||||
* trace/beauty/x86_msr.c
|
||||
*
|
||||
* Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*/
|
||||
|
||||
#include "trace/beauty/beauty.h"
|
||||
|
||||
#include "trace/beauty/generated/x86_arch_MSRs_array.c"
|
||||
|
||||
static DEFINE_STRARRAY(x86_MSRs, "MSR_");
|
||||
static DEFINE_STRARRAY_OFFSET(x86_64_specific_MSRs, "MSR_", x86_64_specific_MSRs_offset);
|
||||
static DEFINE_STRARRAY_OFFSET(x86_AMD_V_KVM_MSRs, "MSR_", x86_AMD_V_KVM_MSRs_offset);
|
||||
|
||||
static struct strarray *x86_MSRs_tables[] = {
|
||||
&strarray__x86_MSRs,
|
||||
&strarray__x86_64_specific_MSRs,
|
||||
&strarray__x86_AMD_V_KVM_MSRs,
|
||||
};
|
||||
|
||||
static DEFINE_STRARRAYS(x86_MSRs_tables);
|
||||
|
||||
static size_t x86_MSR__scnprintf(unsigned long msr, char *bf, size_t size, bool show_prefix)
|
||||
{
|
||||
return strarrays__scnprintf(&strarrays__x86_MSRs_tables, bf, size, "%#x", show_prefix, msr);
|
||||
}
|
||||
|
||||
size_t syscall_arg__scnprintf_x86_MSR(char *bf, size_t size, struct syscall_arg *arg)
|
||||
{
|
||||
unsigned long flags = arg->val;
|
||||
|
||||
return x86_MSR__scnprintf(flags, bf, size, arg->show_string_prefix);
|
||||
}
|
||||
|
||||
bool syscall_arg__strtoul_x86_MSR(char *bf, size_t size, struct syscall_arg *arg __maybe_unused, u64 *ret)
|
||||
{
|
||||
return strarrays__strtoul(&strarrays__x86_MSRs_tables, bf, size, ret);
|
||||
}
|
40
tools/perf/trace/beauty/tracepoints/x86_msr.sh
Executable file
40
tools/perf/trace/beauty/tracepoints/x86_msr.sh
Executable file
@ -0,0 +1,40 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: LGPL-2.1
|
||||
|
||||
if [ $# -ne 1 ] ; then
|
||||
arch_x86_header_dir=tools/arch/x86/include/asm/
|
||||
else
|
||||
arch_x86_header_dir=$1
|
||||
fi
|
||||
|
||||
x86_msr_index=${arch_x86_header_dir}/msr-index.h
|
||||
|
||||
# Support all later, with some hash table, for now chop off
|
||||
# Just the ones starting with 0x00000 so as to have a simple
|
||||
# array.
|
||||
|
||||
printf "static const char *x86_MSRs[] = {\n"
|
||||
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0x00000[[:xdigit:]]+)[[:space:]]*.*'
|
||||
egrep $regex ${x86_msr_index} | egrep -v 'MSR_(ATOM|P[46]|AMD64|IA32_TSCDEADLINE|IDT_FCR4)' | \
|
||||
sed -r "s/$regex/\2 \1/g" | sort -n | \
|
||||
xargs printf "\t[%s] = \"%s\",\n"
|
||||
printf "};\n\n"
|
||||
|
||||
# Remove MSR_K6_WHCR, clashes with MSR_LSTAR
|
||||
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0xc0000[[:xdigit:]]+)[[:space:]]*.*'
|
||||
printf "#define x86_64_specific_MSRs_offset "
|
||||
egrep $regex ${x86_msr_index} | sed -r "s/$regex/\2/g" | sort -n | head -1
|
||||
printf "static const char *x86_64_specific_MSRs[] = {\n"
|
||||
egrep $regex ${x86_msr_index} | \
|
||||
sed -r "s/$regex/\2 \1/g" | egrep -vw 'K6_WHCR' | sort -n | \
|
||||
xargs printf "\t[%s - x86_64_specific_MSRs_offset] = \"%s\",\n"
|
||||
printf "};\n\n"
|
||||
|
||||
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0xc0010[[:xdigit:]]+)[[:space:]]*.*'
|
||||
printf "#define x86_AMD_V_KVM_MSRs_offset "
|
||||
egrep $regex ${x86_msr_index} | sed -r "s/$regex/\2/g" | sort -n | head -1
|
||||
printf "static const char *x86_AMD_V_KVM_MSRs[] = {\n"
|
||||
egrep $regex ${x86_msr_index} | \
|
||||
sed -r "s/$regex/\2 \1/g" | sort -n | \
|
||||
xargs printf "\t[%s - x86_AMD_V_KVM_MSRs_offset] = \"%s\",\n"
|
||||
printf "};\n"
|
@ -95,6 +95,7 @@ perf-y += cloexec.o
|
||||
perf-y += call-path.o
|
||||
perf-y += rwsem.o
|
||||
perf-y += thread-stack.o
|
||||
perf-y += spark.o
|
||||
perf-$(CONFIG_AUXTRACE) += auxtrace.o
|
||||
perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
|
||||
perf-$(CONFIG_AUXTRACE) += intel-pt.o
|
||||
|
@ -853,6 +853,10 @@ static int __symbol__account_cycles(struct cyc_hist *ch,
|
||||
ch[offset].start < start)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ch[offset].num < NUM_SPARKS)
|
||||
ch[offset].cycles_spark[ch[offset].num] = cycles;
|
||||
|
||||
ch[offset].have_start = have_start;
|
||||
ch[offset].start = start;
|
||||
ch[offset].cycles += cycles;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <pthread.h>
|
||||
#include <asm/bug.h>
|
||||
#include "symbol_conf.h"
|
||||
#include "spark.h"
|
||||
|
||||
struct hist_browser_timer;
|
||||
struct hist_entry;
|
||||
@ -235,6 +236,7 @@ struct cyc_hist {
|
||||
u64 cycles_aggr;
|
||||
u64 cycles_max;
|
||||
u64 cycles_min;
|
||||
s64 cycles_spark[NUM_SPARKS];
|
||||
u32 num;
|
||||
u32 num_aggr;
|
||||
u8 have_start;
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "cpumap.h"
|
||||
#include "debug.h"
|
||||
#include "env.h"
|
||||
#include "util/header.h"
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include "bpf-event.h"
|
||||
@ -256,6 +257,21 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_env__read_cpuid(struct perf_env *env)
|
||||
{
|
||||
char cpuid[128];
|
||||
int err = get_cpuid(cpuid, sizeof(cpuid));
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
free(env->cpuid);
|
||||
env->cpuid = strdup(cpuid);
|
||||
if (env->cpuid == NULL)
|
||||
return ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_env__read_arch(struct perf_env *env)
|
||||
{
|
||||
struct utsname uts;
|
||||
|
@ -104,6 +104,7 @@ void perf_env__exit(struct perf_env *env);
|
||||
|
||||
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
|
||||
|
||||
int perf_env__read_cpuid(struct perf_env *env);
|
||||
int perf_env__read_cpu_topology_map(struct perf_env *env);
|
||||
|
||||
void cpu_cache_level__free(struct cpu_cache_level *cache);
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/mmap.h>
|
||||
|
||||
#include <internal/xyarray.h>
|
||||
|
||||
@ -57,7 +58,6 @@ void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
|
||||
{
|
||||
perf_evlist__init(&evlist->core);
|
||||
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
||||
fdarray__init(&evlist->core.pollfd, 64);
|
||||
evlist->workload.pid = -1;
|
||||
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
|
||||
}
|
||||
@ -138,7 +138,7 @@ void evlist__exit(struct evlist *evlist)
|
||||
{
|
||||
zfree(&evlist->mmap);
|
||||
zfree(&evlist->overwrite_mmap);
|
||||
fdarray__exit(&evlist->core.pollfd);
|
||||
perf_evlist__exit(&evlist->core);
|
||||
}
|
||||
|
||||
void evlist__delete(struct evlist *evlist)
|
||||
@ -148,10 +148,6 @@ void evlist__delete(struct evlist *evlist)
|
||||
|
||||
evlist__munmap(evlist);
|
||||
evlist__close(evlist);
|
||||
perf_cpu_map__put(evlist->core.cpus);
|
||||
perf_thread_map__put(evlist->core.threads);
|
||||
evlist->core.cpus = NULL;
|
||||
evlist->core.threads = NULL;
|
||||
evlist__purge(evlist);
|
||||
evlist__exit(evlist);
|
||||
free(evlist);
|
||||
@ -186,6 +182,30 @@ void perf_evlist__splice_list_tail(struct evlist *evlist,
|
||||
}
|
||||
}
|
||||
|
||||
int __evlist__set_tracepoints_handlers(struct evlist *evlist,
|
||||
const struct evsel_str_handler *assocs, size_t nr_assocs)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
size_t i;
|
||||
int err;
|
||||
|
||||
for (i = 0; i < nr_assocs; i++) {
|
||||
// Adding a handler for an event not in this evlist, just ignore it.
|
||||
evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
|
||||
if (evsel == NULL)
|
||||
continue;
|
||||
|
||||
err = -EEXIST;
|
||||
if (evsel->handler != NULL)
|
||||
goto out;
|
||||
evsel->handler = assocs[i].handler;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void __perf_evlist__set_leader(struct list_head *list)
|
||||
{
|
||||
struct evsel *evsel, *leader;
|
||||
@ -403,19 +423,9 @@ int evlist__add_pollfd(struct evlist *evlist, int fd)
|
||||
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
|
||||
}
|
||||
|
||||
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
|
||||
void *arg __maybe_unused)
|
||||
{
|
||||
struct mmap *map = fda->priv[fd].ptr;
|
||||
|
||||
if (map)
|
||||
perf_mmap__put(map);
|
||||
}
|
||||
|
||||
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
|
||||
{
|
||||
return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
|
||||
perf_evlist__munmap_filtered, NULL);
|
||||
return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
|
||||
}
|
||||
|
||||
int evlist__poll(struct evlist *evlist, int timeout)
|
||||
@ -423,22 +433,6 @@ int evlist__poll(struct evlist *evlist, int timeout)
|
||||
return perf_evlist__poll(&evlist->core, timeout);
|
||||
}
|
||||
|
||||
static void perf_evlist__set_sid_idx(struct evlist *evlist,
|
||||
struct evsel *evsel, int idx, int cpu,
|
||||
int thread)
|
||||
{
|
||||
struct perf_sample_id *sid = SID(evsel, cpu, thread);
|
||||
sid->idx = idx;
|
||||
if (evlist->core.cpus && cpu >= 0)
|
||||
sid->cpu = evlist->core.cpus->map[cpu];
|
||||
else
|
||||
sid->cpu = -1;
|
||||
if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
|
||||
sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
|
||||
else
|
||||
sid->tid = -1;
|
||||
}
|
||||
|
||||
struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
@ -577,11 +571,11 @@ static void evlist__munmap_nofree(struct evlist *evlist)
|
||||
|
||||
if (evlist->mmap)
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
||||
perf_mmap__munmap(&evlist->mmap[i]);
|
||||
perf_mmap__munmap(&evlist->mmap[i].core);
|
||||
|
||||
if (evlist->overwrite_mmap)
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
||||
perf_mmap__munmap(&evlist->overwrite_mmap[i]);
|
||||
perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
|
||||
}
|
||||
|
||||
void evlist__munmap(struct evlist *evlist)
|
||||
@ -591,6 +585,13 @@ void evlist__munmap(struct evlist *evlist)
|
||||
zfree(&evlist->overwrite_mmap);
|
||||
}
|
||||
|
||||
static void perf_mmap__unmap_cb(struct perf_mmap *map)
|
||||
{
|
||||
struct mmap *m = container_of(map, struct mmap, core);
|
||||
|
||||
mmap__munmap(m);
|
||||
}
|
||||
|
||||
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
|
||||
bool overwrite)
|
||||
{
|
||||
@ -605,8 +606,6 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
map[i].core.fd = -1;
|
||||
map[i].core.overwrite = overwrite;
|
||||
/*
|
||||
* When the perf_mmap() call is made we grab one refcount, plus
|
||||
* one extra to let perf_mmap__consume() get the last
|
||||
@ -616,151 +615,54 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
|
||||
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
|
||||
* thus does perf_mmap__get() on it.
|
||||
*/
|
||||
refcount_set(&map[i].core.refcnt, 0);
|
||||
perf_mmap__init(&map[i].core, overwrite, perf_mmap__unmap_cb);
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
static bool
|
||||
perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
|
||||
struct evsel *evsel)
|
||||
static void
|
||||
perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
|
||||
struct perf_mmap_param *_mp,
|
||||
int idx, bool per_cpu)
|
||||
{
|
||||
if (evsel->core.attr.write_backward)
|
||||
return false;
|
||||
return true;
|
||||
struct evlist *evlist = container_of(_evlist, struct evlist, core);
|
||||
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
|
||||
|
||||
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
|
||||
}
|
||||
|
||||
static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
||||
struct mmap_params *mp, int cpu_idx,
|
||||
int thread, int *_output, int *_output_overwrite)
|
||||
static struct perf_mmap*
|
||||
perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
int revent;
|
||||
int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
|
||||
struct evlist *evlist = container_of(_evlist, struct evlist, core);
|
||||
struct mmap *maps = evlist->mmap;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
struct mmap *maps = evlist->mmap;
|
||||
int *output = _output;
|
||||
int fd;
|
||||
int cpu;
|
||||
if (overwrite) {
|
||||
maps = evlist->overwrite_mmap;
|
||||
|
||||
mp->prot = PROT_READ | PROT_WRITE;
|
||||
if (evsel->core.attr.write_backward) {
|
||||
output = _output_overwrite;
|
||||
maps = evlist->overwrite_mmap;
|
||||
if (!maps) {
|
||||
maps = evlist__alloc_mmap(evlist, true);
|
||||
if (!maps)
|
||||
return NULL;
|
||||
|
||||
if (!maps) {
|
||||
maps = evlist__alloc_mmap(evlist, true);
|
||||
if (!maps)
|
||||
return -1;
|
||||
evlist->overwrite_mmap = maps;
|
||||
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
|
||||
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
|
||||
}
|
||||
mp->prot &= ~PROT_WRITE;
|
||||
}
|
||||
|
||||
if (evsel->core.system_wide && thread)
|
||||
continue;
|
||||
|
||||
cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
|
||||
if (cpu == -1)
|
||||
continue;
|
||||
|
||||
fd = FD(evsel, cpu, thread);
|
||||
|
||||
if (*output == -1) {
|
||||
*output = fd;
|
||||
|
||||
if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
|
||||
return -1;
|
||||
} else {
|
||||
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
|
||||
return -1;
|
||||
|
||||
perf_mmap__get(&maps[idx]);
|
||||
}
|
||||
|
||||
revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
|
||||
|
||||
/*
|
||||
* The system_wide flag causes a selected event to be opened
|
||||
* always without a pid. Consequently it will never get a
|
||||
* POLLHUP, but it is used for tracking in combination with
|
||||
* other events, so it should not need to be polled anyway.
|
||||
* Therefore don't add it for polling.
|
||||
*/
|
||||
if (!evsel->core.system_wide &&
|
||||
perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
|
||||
perf_mmap__put(&maps[idx]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
|
||||
if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
|
||||
fd) < 0)
|
||||
return -1;
|
||||
perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
|
||||
thread);
|
||||
evlist->overwrite_mmap = maps;
|
||||
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
|
||||
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return &maps[idx].core;
|
||||
}
|
||||
|
||||
static int evlist__mmap_per_cpu(struct evlist *evlist,
|
||||
struct mmap_params *mp)
|
||||
static int
|
||||
perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
|
||||
int output, int cpu)
|
||||
{
|
||||
int cpu, thread;
|
||||
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
|
||||
int nr_threads = perf_thread_map__nr(evlist->core.threads);
|
||||
struct mmap *map = container_of(_map, struct mmap, core);
|
||||
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
|
||||
|
||||
pr_debug2("perf event ring buffer mmapped per cpu\n");
|
||||
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
||||
int output = -1;
|
||||
int output_overwrite = -1;
|
||||
|
||||
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
|
||||
true);
|
||||
|
||||
for (thread = 0; thread < nr_threads; thread++) {
|
||||
if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
|
||||
thread, &output, &output_overwrite))
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
evlist__munmap_nofree(evlist);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int evlist__mmap_per_thread(struct evlist *evlist,
|
||||
struct mmap_params *mp)
|
||||
{
|
||||
int thread;
|
||||
int nr_threads = perf_thread_map__nr(evlist->core.threads);
|
||||
|
||||
pr_debug2("perf event ring buffer mmapped per thread\n");
|
||||
for (thread = 0; thread < nr_threads; thread++) {
|
||||
int output = -1;
|
||||
int output_overwrite = -1;
|
||||
|
||||
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
|
||||
false);
|
||||
|
||||
if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
|
||||
&output, &output_overwrite))
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
evlist__munmap_nofree(evlist);
|
||||
return -1;
|
||||
return mmap__mmap(map, mp, output, cpu);
|
||||
}
|
||||
|
||||
unsigned long perf_event_mlock_kb_in_pages(void)
|
||||
@ -890,43 +792,36 @@ int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
||||
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
|
||||
int comp_level)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
const struct perf_cpu_map *cpus = evlist->core.cpus;
|
||||
const struct perf_thread_map *threads = evlist->core.threads;
|
||||
/*
|
||||
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
|
||||
* Its value is decided by evsel's write_backward.
|
||||
* So &mp should not be passed through const pointer.
|
||||
*/
|
||||
struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush,
|
||||
.comp_level = comp_level };
|
||||
struct mmap_params mp = {
|
||||
.nr_cblocks = nr_cblocks,
|
||||
.affinity = affinity,
|
||||
.flush = flush,
|
||||
.comp_level = comp_level
|
||||
};
|
||||
struct perf_evlist_mmap_ops ops = {
|
||||
.idx = perf_evlist__mmap_cb_idx,
|
||||
.get = perf_evlist__mmap_cb_get,
|
||||
.mmap = perf_evlist__mmap_cb_mmap,
|
||||
};
|
||||
|
||||
if (!evlist->mmap)
|
||||
evlist->mmap = evlist__alloc_mmap(evlist, false);
|
||||
if (!evlist->mmap)
|
||||
return -ENOMEM;
|
||||
|
||||
if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
evlist->core.mmap_len = evlist__mmap_size(pages);
|
||||
pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
|
||||
mp.mask = evlist->core.mmap_len - page_size - 1;
|
||||
mp.core.mask = evlist->core.mmap_len - page_size - 1;
|
||||
|
||||
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
|
||||
auxtrace_pages, auxtrace_overwrite);
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
|
||||
evsel->core.sample_id == NULL &&
|
||||
perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (perf_cpu_map__empty(cpus))
|
||||
return evlist__mmap_per_thread(evlist, &mp);
|
||||
|
||||
return evlist__mmap_per_cpu(evlist, &mp);
|
||||
return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
|
||||
}
|
||||
|
||||
int evlist__mmap(struct evlist *evlist, unsigned int pages)
|
||||
@ -1029,6 +924,9 @@ int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
|
||||
struct evsel *evsel;
|
||||
int err = 0;
|
||||
|
||||
if (filter == NULL)
|
||||
return -1;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
|
||||
continue;
|
||||
@ -1041,16 +939,35 @@ int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
|
||||
return err;
|
||||
}
|
||||
|
||||
int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
|
||||
int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
int err = 0;
|
||||
|
||||
if (filter == NULL)
|
||||
return -1;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
|
||||
continue;
|
||||
|
||||
err = perf_evsel__append_tp_filter(evsel, filter);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
|
||||
{
|
||||
char *filter;
|
||||
int ret = -1;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < npids; ++i) {
|
||||
if (i == 0) {
|
||||
if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
|
||||
return -1;
|
||||
return NULL;
|
||||
} else {
|
||||
char *tmp;
|
||||
|
||||
@ -1062,8 +979,17 @@ int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *
|
||||
}
|
||||
}
|
||||
|
||||
ret = perf_evlist__set_tp_filter(evlist, filter);
|
||||
return filter;
|
||||
out_free:
|
||||
free(filter);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
|
||||
{
|
||||
char *filter = asprintf__tp_filter_pids(npids, pids);
|
||||
int ret = perf_evlist__set_tp_filter(evlist, filter);
|
||||
|
||||
free(filter);
|
||||
return ret;
|
||||
}
|
||||
@ -1073,6 +999,20 @@ int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
|
||||
return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
|
||||
}
|
||||
|
||||
int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
|
||||
{
|
||||
char *filter = asprintf__tp_filter_pids(npids, pids);
|
||||
int ret = perf_evlist__append_tp_filter(evlist, filter);
|
||||
|
||||
free(filter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
|
||||
{
|
||||
return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
|
||||
}
|
||||
|
||||
bool perf_evlist__valid_sample_type(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *pos;
|
||||
@ -1729,9 +1669,9 @@ static void *perf_evlist__poll_thread(void *arg)
|
||||
struct mmap *map = &evlist->mmap[i];
|
||||
union perf_event *event;
|
||||
|
||||
if (perf_mmap__read_init(map))
|
||||
if (perf_mmap__read_init(&map->core))
|
||||
continue;
|
||||
while ((event = perf_mmap__read_event(map)) != NULL) {
|
||||
while ((event = perf_mmap__read_event(&map->core)) != NULL) {
|
||||
struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
|
||||
|
||||
if (evsel && evsel->side_band.cb)
|
||||
@ -1739,10 +1679,10 @@ static void *perf_evlist__poll_thread(void *arg)
|
||||
else
|
||||
pr_warning("cannot locate proper evsel for the side band event\n");
|
||||
|
||||
perf_mmap__consume(map);
|
||||
perf_mmap__consume(&map->core);
|
||||
got_data = true;
|
||||
}
|
||||
perf_mmap__read_done(map);
|
||||
perf_mmap__read_done(&map->core);
|
||||
}
|
||||
|
||||
if (draining && !got_data)
|
||||
|
@ -118,6 +118,13 @@ void perf_evlist__stop_sb_thread(struct evlist *evlist);
|
||||
int perf_evlist__add_newtp(struct evlist *evlist,
|
||||
const char *sys, const char *name, void *handler);
|
||||
|
||||
int __evlist__set_tracepoints_handlers(struct evlist *evlist,
|
||||
const struct evsel_str_handler *assocs,
|
||||
size_t nr_assocs);
|
||||
|
||||
#define evlist__set_tracepoints_handlers(evlist, array) \
|
||||
__evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
|
||||
|
||||
void __perf_evlist__set_sample_bit(struct evlist *evlist,
|
||||
enum perf_event_sample_format bit);
|
||||
void __perf_evlist__reset_sample_bit(struct evlist *evlist,
|
||||
@ -133,6 +140,11 @@ int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter);
|
||||
int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid);
|
||||
int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
|
||||
|
||||
int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter);
|
||||
|
||||
int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid);
|
||||
int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
|
||||
|
||||
struct evsel *
|
||||
perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h> // sysconf()
|
||||
#include <perf/mmap.h>
|
||||
#ifdef HAVE_LIBNUMA_SUPPORT
|
||||
#include <numaif.h>
|
||||
#endif
|
||||
@ -23,116 +24,9 @@
|
||||
#include "../perf.h"
|
||||
#include <internal/lib.h> /* page_size */
|
||||
|
||||
size_t perf_mmap__mmap_len(struct mmap *map)
|
||||
size_t mmap__mmap_len(struct mmap *map)
|
||||
{
|
||||
return map->core.mask + 1 + page_size;
|
||||
}
|
||||
|
||||
/* When check_messup is true, 'end' must points to a good entry */
|
||||
static union perf_event *perf_mmap__read(struct mmap *map,
|
||||
u64 *startp, u64 end)
|
||||
{
|
||||
unsigned char *data = map->core.base + page_size;
|
||||
union perf_event *event = NULL;
|
||||
int diff = end - *startp;
|
||||
|
||||
if (diff >= (int)sizeof(event->header)) {
|
||||
size_t size;
|
||||
|
||||
event = (union perf_event *)&data[*startp & map->core.mask];
|
||||
size = event->header.size;
|
||||
|
||||
if (size < sizeof(event->header) || diff < (int)size)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Event straddles the mmap boundary -- header should always
|
||||
* be inside due to u64 alignment of output.
|
||||
*/
|
||||
if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
|
||||
unsigned int offset = *startp;
|
||||
unsigned int len = min(sizeof(*event), size), cpy;
|
||||
void *dst = map->core.event_copy;
|
||||
|
||||
do {
|
||||
cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
|
||||
memcpy(dst, &data[offset & map->core.mask], cpy);
|
||||
offset += cpy;
|
||||
dst += cpy;
|
||||
len -= cpy;
|
||||
} while (len);
|
||||
|
||||
event = (union perf_event *)map->core.event_copy;
|
||||
}
|
||||
|
||||
*startp += size;
|
||||
}
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read event from ring buffer one by one.
|
||||
* Return one event for each call.
|
||||
*
|
||||
* Usage:
|
||||
* perf_mmap__read_init()
|
||||
* while(event = perf_mmap__read_event()) {
|
||||
* //process the event
|
||||
* perf_mmap__consume()
|
||||
* }
|
||||
* perf_mmap__read_done()
|
||||
*/
|
||||
union perf_event *perf_mmap__read_event(struct mmap *map)
|
||||
{
|
||||
union perf_event *event;
|
||||
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->core.refcnt))
|
||||
return NULL;
|
||||
|
||||
/* non-overwirte doesn't pause the ringbuffer */
|
||||
if (!map->core.overwrite)
|
||||
map->core.end = perf_mmap__read_head(map);
|
||||
|
||||
event = perf_mmap__read(map, &map->core.start, map->core.end);
|
||||
|
||||
if (!map->core.overwrite)
|
||||
map->core.prev = map->core.start;
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
static bool perf_mmap__empty(struct mmap *map)
|
||||
{
|
||||
return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
|
||||
}
|
||||
|
||||
void perf_mmap__get(struct mmap *map)
|
||||
{
|
||||
refcount_inc(&map->core.refcnt);
|
||||
}
|
||||
|
||||
void perf_mmap__put(struct mmap *map)
|
||||
{
|
||||
BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
|
||||
|
||||
if (refcount_dec_and_test(&map->core.refcnt))
|
||||
perf_mmap__munmap(map);
|
||||
}
|
||||
|
||||
void perf_mmap__consume(struct mmap *map)
|
||||
{
|
||||
if (!map->core.overwrite) {
|
||||
u64 old = map->core.prev;
|
||||
|
||||
perf_mmap__write_tail(map, old);
|
||||
}
|
||||
|
||||
if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
|
||||
perf_mmap__put(map);
|
||||
return perf_mmap__mmap_len(&map->core);
|
||||
}
|
||||
|
||||
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
|
||||
@ -170,7 +64,7 @@ static int perf_mmap__aio_enabled(struct mmap *map)
|
||||
#ifdef HAVE_LIBNUMA_SUPPORT
|
||||
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
|
||||
{
|
||||
map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
|
||||
map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
|
||||
if (map->aio.data[idx] == MAP_FAILED) {
|
||||
map->aio.data[idx] = NULL;
|
||||
@ -183,7 +77,7 @@ static int perf_mmap__aio_alloc(struct mmap *map, int idx)
|
||||
static void perf_mmap__aio_free(struct mmap *map, int idx)
|
||||
{
|
||||
if (map->aio.data[idx]) {
|
||||
munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
|
||||
munmap(map->aio.data[idx], mmap__mmap_len(map));
|
||||
map->aio.data[idx] = NULL;
|
||||
}
|
||||
}
|
||||
@ -196,7 +90,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
|
||||
|
||||
if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
|
||||
data = map->aio.data[idx];
|
||||
mmap_len = perf_mmap__mmap_len(map);
|
||||
mmap_len = mmap__mmap_len(map);
|
||||
node_mask = 1UL << cpu__get_node(cpu);
|
||||
if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
|
||||
pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
|
||||
@ -210,7 +104,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
|
||||
#else /* !HAVE_LIBNUMA_SUPPORT */
|
||||
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
|
||||
{
|
||||
map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
|
||||
map->aio.data[idx] = malloc(mmap__mmap_len(map));
|
||||
if (map->aio.data[idx] == NULL)
|
||||
return -1;
|
||||
|
||||
@ -311,19 +205,13 @@ static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
|
||||
}
|
||||
#endif
|
||||
|
||||
void perf_mmap__munmap(struct mmap *map)
|
||||
void mmap__munmap(struct mmap *map)
|
||||
{
|
||||
perf_mmap__aio_munmap(map);
|
||||
if (map->data != NULL) {
|
||||
munmap(map->data, perf_mmap__mmap_len(map));
|
||||
munmap(map->data, mmap__mmap_len(map));
|
||||
map->data = NULL;
|
||||
}
|
||||
if (map->core.base != NULL) {
|
||||
munmap(map->core.base, perf_mmap__mmap_len(map));
|
||||
map->core.base = NULL;
|
||||
map->core.fd = -1;
|
||||
refcount_set(&map->core.refcnt, 0);
|
||||
}
|
||||
auxtrace_mmap__munmap(&map->auxtrace_mmap);
|
||||
}
|
||||
|
||||
@ -353,34 +241,13 @@ static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params
|
||||
CPU_SET(map->core.cpu, &map->affinity_mask);
|
||||
}
|
||||
|
||||
int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
|
||||
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
|
||||
{
|
||||
/*
|
||||
* The last one will be done at perf_mmap__consume(), so that we
|
||||
* make sure we don't prevent tools from consuming every last event in
|
||||
* the ring buffer.
|
||||
*
|
||||
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
|
||||
* anymore, but the last events for it are still in the ring buffer,
|
||||
* waiting to be consumed.
|
||||
*
|
||||
* Tools can chose to ignore this at their own discretion, but the
|
||||
* evlist layer can't just drop it when filtering events in
|
||||
* perf_evlist__filter_pollfd().
|
||||
*/
|
||||
refcount_set(&map->core.refcnt, 2);
|
||||
map->core.prev = 0;
|
||||
map->core.mask = mp->mask;
|
||||
map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
|
||||
MAP_SHARED, fd, 0);
|
||||
if (map->core.base == MAP_FAILED) {
|
||||
if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
|
||||
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
|
||||
errno);
|
||||
map->core.base = NULL;
|
||||
return -1;
|
||||
}
|
||||
map->core.fd = fd;
|
||||
map->core.cpu = cpu;
|
||||
|
||||
perf_mmap__setup_affinity_mask(map, mp);
|
||||
|
||||
@ -389,7 +256,7 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
|
||||
map->comp_level = mp->comp_level;
|
||||
|
||||
if (map->comp_level && !perf_mmap__aio_enabled(map)) {
|
||||
map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
|
||||
map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
|
||||
if (map->data == MAP_FAILED) {
|
||||
pr_debug2("failed to mmap data buffer, error %d\n",
|
||||
@ -406,96 +273,16 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
|
||||
return perf_mmap__aio_mmap(map, mp);
|
||||
}
|
||||
|
||||
static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
|
||||
{
|
||||
struct perf_event_header *pheader;
|
||||
u64 evt_head = *start;
|
||||
int size = mask + 1;
|
||||
|
||||
pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
|
||||
pheader = (struct perf_event_header *)(buf + (*start & mask));
|
||||
while (true) {
|
||||
if (evt_head - *start >= (unsigned int)size) {
|
||||
pr_debug("Finished reading overwrite ring buffer: rewind\n");
|
||||
if (evt_head - *start > (unsigned int)size)
|
||||
evt_head -= pheader->size;
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
|
||||
|
||||
if (pheader->size == 0) {
|
||||
pr_debug("Finished reading overwrite ring buffer: get start\n");
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
evt_head += pheader->size;
|
||||
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
|
||||
}
|
||||
WARN_ONCE(1, "Shouldn't get here\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report the start and end of the available data in ringbuffer
|
||||
*/
|
||||
static int __perf_mmap__read_init(struct mmap *md)
|
||||
{
|
||||
u64 head = perf_mmap__read_head(md);
|
||||
u64 old = md->core.prev;
|
||||
unsigned char *data = md->core.base + page_size;
|
||||
unsigned long size;
|
||||
|
||||
md->core.start = md->core.overwrite ? head : old;
|
||||
md->core.end = md->core.overwrite ? old : head;
|
||||
|
||||
if ((md->core.end - md->core.start) < md->core.flush)
|
||||
return -EAGAIN;
|
||||
|
||||
size = md->core.end - md->core.start;
|
||||
if (size > (unsigned long)(md->core.mask) + 1) {
|
||||
if (!md->core.overwrite) {
|
||||
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
|
||||
|
||||
md->core.prev = head;
|
||||
perf_mmap__consume(md);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Backward ring buffer is full. We still have a chance to read
|
||||
* most of data from it.
|
||||
*/
|
||||
if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_mmap__read_init(struct mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->core.refcnt))
|
||||
return -ENOENT;
|
||||
|
||||
return __perf_mmap__read_init(map);
|
||||
}
|
||||
|
||||
int perf_mmap__push(struct mmap *md, void *to,
|
||||
int push(struct mmap *map, void *to, void *buf, size_t size))
|
||||
{
|
||||
u64 head = perf_mmap__read_head(md);
|
||||
u64 head = perf_mmap__read_head(&md->core);
|
||||
unsigned char *data = md->core.base + page_size;
|
||||
unsigned long size;
|
||||
void *buf;
|
||||
int rc = 0;
|
||||
|
||||
rc = perf_mmap__read_init(md);
|
||||
rc = perf_mmap__read_init(&md->core);
|
||||
if (rc < 0)
|
||||
return (rc == -EAGAIN) ? 1 : -1;
|
||||
|
||||
@ -522,24 +309,7 @@ int perf_mmap__push(struct mmap *md, void *to,
|
||||
}
|
||||
|
||||
md->core.prev = head;
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mandatory for overwrite mode
|
||||
* The direction of overwrite mode is backward.
|
||||
* The last perf_mmap__read() will set tail to map->core.prev.
|
||||
* Need to correct the map->core.prev to head which is the end of next read.
|
||||
*/
|
||||
void perf_mmap__read_done(struct mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->core.refcnt))
|
||||
return;
|
||||
|
||||
map->core.prev = perf_mmap__read_head(map);
|
||||
}
|
||||
|
@ -37,37 +37,19 @@ struct mmap {
|
||||
};
|
||||
|
||||
struct mmap_params {
|
||||
int prot, mask, nr_cblocks, affinity, flush, comp_level;
|
||||
struct perf_mmap_param core;
|
||||
int nr_cblocks, affinity, flush, comp_level;
|
||||
struct auxtrace_mmap_params auxtrace_mp;
|
||||
};
|
||||
|
||||
int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
|
||||
void perf_mmap__munmap(struct mmap *map);
|
||||
|
||||
void perf_mmap__get(struct mmap *map);
|
||||
void perf_mmap__put(struct mmap *map);
|
||||
|
||||
void perf_mmap__consume(struct mmap *map);
|
||||
|
||||
static inline u64 perf_mmap__read_head(struct mmap *mm)
|
||||
{
|
||||
return ring_buffer_read_head(mm->core.base);
|
||||
}
|
||||
|
||||
static inline void perf_mmap__write_tail(struct mmap *md, u64 tail)
|
||||
{
|
||||
ring_buffer_write_tail(md->core.base, tail);
|
||||
}
|
||||
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
|
||||
void mmap__munmap(struct mmap *map);
|
||||
|
||||
union perf_event *perf_mmap__read_forward(struct mmap *map);
|
||||
|
||||
union perf_event *perf_mmap__read_event(struct mmap *map);
|
||||
|
||||
int perf_mmap__push(struct mmap *md, void *to,
|
||||
int push(struct mmap *map, void *to, void *buf, size_t size));
|
||||
|
||||
size_t perf_mmap__mmap_len(struct mmap *map);
|
||||
size_t mmap__mmap_len(struct mmap *map);
|
||||
|
||||
int perf_mmap__read_init(struct mmap *md);
|
||||
void perf_mmap__read_done(struct mmap *map);
|
||||
#endif /*__PERF_MMAP_H */
|
||||
|
@ -13,7 +13,7 @@ static int
|
||||
__parse_regs(const struct option *opt, const char *str, int unset, bool intr)
|
||||
{
|
||||
uint64_t *mode = (uint64_t *)opt->value;
|
||||
const struct sample_reg *r;
|
||||
const struct sample_reg *r = NULL;
|
||||
char *s, *os = NULL, *p;
|
||||
int ret = -1;
|
||||
uint64_t mask;
|
||||
@ -46,19 +46,23 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr)
|
||||
|
||||
if (!strcmp(s, "?")) {
|
||||
fprintf(stderr, "available registers: ");
|
||||
#ifdef HAVE_PERF_REGS_SUPPORT
|
||||
for (r = sample_reg_masks; r->name; r++) {
|
||||
if (r->mask & mask)
|
||||
fprintf(stderr, "%s ", r->name);
|
||||
}
|
||||
#endif
|
||||
fputc('\n', stderr);
|
||||
/* just printing available regs */
|
||||
return -1;
|
||||
}
|
||||
#ifdef HAVE_PERF_REGS_SUPPORT
|
||||
for (r = sample_reg_masks; r->name; r++) {
|
||||
if ((r->mask & mask) && !strcasecmp(s, r->name))
|
||||
break;
|
||||
}
|
||||
if (!r->name) {
|
||||
#endif
|
||||
if (!r || !r->name) {
|
||||
ui__warning("Unknown register \"%s\", check man page or run \"perf record %s?\"\n",
|
||||
s, intr ? "-I" : "--user-regs=");
|
||||
goto error;
|
||||
|
@ -3,10 +3,6 @@
|
||||
#include "perf_regs.h"
|
||||
#include "event.h"
|
||||
|
||||
const struct sample_reg __weak sample_reg_masks[] = {
|
||||
SMPL_REG_END
|
||||
};
|
||||
|
||||
int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
|
||||
char **new_op __maybe_unused)
|
||||
{
|
||||
|
@ -15,8 +15,6 @@ struct sample_reg {
|
||||
#define SMPL_REG2(n, b) { .name = #n, .mask = 3ULL << (b) }
|
||||
#define SMPL_REG_END { .name = NULL }
|
||||
|
||||
extern const struct sample_reg sample_reg_masks[];
|
||||
|
||||
enum {
|
||||
SDT_ARG_VALID = 0,
|
||||
SDT_ARG_SKIP,
|
||||
@ -27,6 +25,8 @@ uint64_t arch__intr_reg_mask(void);
|
||||
uint64_t arch__user_reg_mask(void);
|
||||
|
||||
#ifdef HAVE_PERF_REGS_SUPPORT
|
||||
extern const struct sample_reg sample_reg_masks[];
|
||||
|
||||
#include <perf_regs.h>
|
||||
|
||||
#define DWARF_MINIMAL_REGS ((1ULL << PERF_REG_IP) | (1ULL << PERF_REG_SP))
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <traceevent/event-parse.h>
|
||||
#include <perf/mmap.h>
|
||||
#include "evlist.h"
|
||||
#include "callchain.h"
|
||||
#include "evsel.h"
|
||||
@ -1022,10 +1023,10 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
|
||||
if (!md)
|
||||
return NULL;
|
||||
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto end;
|
||||
|
||||
event = perf_mmap__read_event(md);
|
||||
event = perf_mmap__read_event(&md->core);
|
||||
if (event != NULL) {
|
||||
PyObject *pyevent = pyrf_event__new(event);
|
||||
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
|
||||
@ -1045,7 +1046,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
|
||||
err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
|
||||
|
||||
/* Consume the even only after we parsed it out. */
|
||||
perf_mmap__consume(md);
|
||||
perf_mmap__consume(&md->core);
|
||||
|
||||
if (err)
|
||||
return PyErr_Format(PyExc_OSError,
|
||||
|
@ -2355,35 +2355,6 @@ void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
|
||||
fprintf(fp, "# ========\n#\n");
|
||||
}
|
||||
|
||||
|
||||
int __perf_session__set_tracepoints_handlers(struct perf_session *session,
|
||||
const struct evsel_str_handler *assocs,
|
||||
size_t nr_assocs)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
size_t i;
|
||||
int err;
|
||||
|
||||
for (i = 0; i < nr_assocs; i++) {
|
||||
/*
|
||||
* Adding a handler for an event not in the session,
|
||||
* just ignore it.
|
||||
*/
|
||||
evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
|
||||
if (evsel == NULL)
|
||||
continue;
|
||||
|
||||
err = -EEXIST;
|
||||
if (evsel->handler != NULL)
|
||||
goto out;
|
||||
evsel->handler = assocs[i].handler;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int perf_event__process_id_index(struct perf_session *session,
|
||||
union perf_event *event)
|
||||
{
|
||||
|
@ -120,12 +120,8 @@ void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
|
||||
|
||||
struct evsel_str_handler;
|
||||
|
||||
int __perf_session__set_tracepoints_handlers(struct perf_session *session,
|
||||
const struct evsel_str_handler *assocs,
|
||||
size_t nr_assocs);
|
||||
|
||||
#define perf_session__set_tracepoints_handlers(session, array) \
|
||||
__perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
|
||||
__evlist__set_tracepoints_handlers(session->evlist, array, ARRAY_SIZE(array))
|
||||
|
||||
extern volatile int session_done;
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include "callchain.h"
|
||||
#include "values.h"
|
||||
#include "hist.h"
|
||||
#include "stat.h"
|
||||
#include "spark.h"
|
||||
|
||||
struct option;
|
||||
struct thread;
|
||||
@ -71,6 +73,8 @@ struct hist_entry_diff {
|
||||
/* PERF_HPP_DIFF__CYCLES */
|
||||
s64 cycles;
|
||||
};
|
||||
struct stats stats;
|
||||
unsigned long svals[NUM_SPARKS];
|
||||
};
|
||||
|
||||
struct hist_entry_ops {
|
||||
|
34
tools/perf/util/spark.c
Normal file
34
tools/perf/util/spark.c
Normal file
@ -0,0 +1,34 @@
|
||||
#include <stdio.h>
|
||||
#include <limits.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "spark.h"
|
||||
#include "stat.h"
|
||||
|
||||
#define SPARK_SHIFT 8
|
||||
|
||||
/* Print spark lines on outf for numval values in val. */
|
||||
int print_spark(char *bf, int size, unsigned long *val, int numval)
|
||||
{
|
||||
static const char *ticks[NUM_SPARKS] = {
|
||||
"▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"
|
||||
};
|
||||
int i, printed = 0;
|
||||
unsigned long min = ULONG_MAX, max = 0, f;
|
||||
|
||||
for (i = 0; i < numval; i++) {
|
||||
if (val[i] < min)
|
||||
min = val[i];
|
||||
if (val[i] > max)
|
||||
max = val[i];
|
||||
}
|
||||
f = ((max - min) << SPARK_SHIFT) / (NUM_SPARKS - 1);
|
||||
if (f < 1)
|
||||
f = 1;
|
||||
for (i = 0; i < numval; i++) {
|
||||
printed += scnprintf(bf + printed, size - printed, "%s",
|
||||
ticks[((val[i] - min) << SPARK_SHIFT) / f]);
|
||||
}
|
||||
|
||||
return printed;
|
||||
}
|
8
tools/perf/util/spark.h
Normal file
8
tools/perf/util/spark.h
Normal file
@ -0,0 +1,8 @@
|
||||
#ifndef SPARK_H
|
||||
#define SPARK_H 1
|
||||
|
||||
#define NUM_SPARKS 8
|
||||
|
||||
int print_spark(char *bf, int size, unsigned long *val, int numval);
|
||||
|
||||
#endif
|
@ -11,6 +11,7 @@
|
||||
#include <stdio.h>
|
||||
#include "path.h"
|
||||
#include "symbol_conf.h"
|
||||
#include "spark.h"
|
||||
|
||||
#ifdef HAVE_LIBELF_SUPPORT
|
||||
#include <libelf.h>
|
||||
@ -111,6 +112,7 @@ struct block_info {
|
||||
u64 end;
|
||||
u64 cycles;
|
||||
u64 cycles_aggr;
|
||||
s64 cycles_spark[NUM_SPARKS];
|
||||
int num;
|
||||
int num_aggr;
|
||||
refcount_t refcnt;
|
||||
|
Loading…
Reference in New Issue
Block a user