perf tools fixes for v5.19: 4th batch
- Fix SIGSEGV when processing syscall args in perf.data files in 'perf trace'. - Sync kvm, msr-index and cpufeatures headers with the kernel sources. - Fix 'convert perf time to TSC' 'perf test': - No need to open events twice. - Fix finding correct event on hybrid systems. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> -----BEGIN PGP SIGNATURE----- iHQEABYKAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCYtQvSAAKCRCyPKLppCJ+ J1RLAQCX7wriY00kluSNoeCxk1I9r9F64AJXPsRV/vE/j+Xc1gD4rA+l5QYG6Ja/ ICUXmTbaOjsUhAMNY+aw+1bwuRUxAA== =El3Z -----END PGP SIGNATURE----- Merge tag 'perf-tools-fixes-for-v5.19-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux Pull perf tools fixes from Arnaldo Carvalho de Melo: - Fix SIGSEGV when processing syscall args in perf.data files in 'perf trace' - Sync kvm, msr-index and cpufeatures headers with the kernel sources - Fix 'convert perf time to TSC' 'perf test': - No need to open events twice - Fix finding correct event on hybrid systems * tag 'perf-tools-fixes-for-v5.19-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: perf trace: Fix SIGSEGV when processing syscall args perf tests: Fix Convert perf time to TSC test for hybrid perf tests: Stop Convert perf time to TSC test opening events twice tools arch x86: Sync the msr-index.h copy with the kernel sources tools headers cpufeatures: Sync with the kernel sources tools headers UAPI: Sync linux/kvm.h with the kernel sources
This commit is contained in:
commit
f7f4da303d
@ -203,8 +203,8 @@
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
|
||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
|
||||
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
@ -296,6 +296,12 @@
|
||||
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
|
||||
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
|
||||
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
|
||||
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
|
||||
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
|
||||
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
@ -316,6 +322,7 @@
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
||||
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
|
||||
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
|
||||
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
@ -447,5 +454,6 @@
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -50,6 +50,25 @@
|
||||
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
# define DISABLE_RETPOLINE 0
|
||||
#else
|
||||
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
|
||||
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
# define DISABLE_RETHUNK 0
|
||||
#else
|
||||
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
# define DISABLE_UNRET 0
|
||||
#else
|
||||
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
# define DISABLE_ENQCMD 0
|
||||
#else
|
||||
@ -82,7 +101,7 @@
|
||||
#define DISABLED_MASK8 (DISABLE_TDX_GUEST)
|
||||
#define DISABLED_MASK9 (DISABLE_SGX)
|
||||
#define DISABLED_MASK10 0
|
||||
#define DISABLED_MASK11 0
|
||||
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
|
||||
#define DISABLED_MASK12 0
|
||||
#define DISABLED_MASK13 0
|
||||
#define DISABLED_MASK14 0
|
||||
|
@ -95,6 +95,7 @@
|
||||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
|
||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
|
||||
#define ARCH_CAP_SSB_NO BIT(4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
@ -576,6 +577,9 @@
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
|
||||
|
||||
/* Fam 16h MSRs */
|
||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||
|
@ -2083,6 +2083,7 @@ struct kvm_stats_header {
|
||||
#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
|
||||
#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
|
||||
#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
|
||||
#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
|
||||
#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
|
||||
|
||||
#define KVM_STATS_BASE_SHIFT 8
|
||||
|
@ -4280,6 +4280,7 @@ static int trace__replay(struct trace *trace)
|
||||
goto out;
|
||||
|
||||
evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
|
||||
trace->syscalls.events.sys_enter = evsel;
|
||||
/* older kernels have syscalls tp versus raw_syscalls */
|
||||
if (evsel == NULL)
|
||||
evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
|
||||
@ -4292,6 +4293,7 @@ static int trace__replay(struct trace *trace)
|
||||
}
|
||||
|
||||
evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
|
||||
trace->syscalls.events.sys_exit = evsel;
|
||||
if (evsel == NULL)
|
||||
evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
|
||||
if (evsel &&
|
||||
|
@ -20,8 +20,6 @@
|
||||
#include "tsc.h"
|
||||
#include "mmap.h"
|
||||
#include "tests.h"
|
||||
#include "pmu.h"
|
||||
#include "pmu-hybrid.h"
|
||||
|
||||
/*
|
||||
* Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just
|
||||
@ -106,28 +104,21 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
|
||||
evlist__config(evlist, &opts, NULL);
|
||||
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
evsel->core.attr.enable_on_exec = 0;
|
||||
|
||||
/*
|
||||
* For hybrid "cycles:u", it creates two events.
|
||||
* Init the second evsel here.
|
||||
*/
|
||||
if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
|
||||
evsel = evsel__next(evsel);
|
||||
/* For hybrid "cycles:u", it creates two events */
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
evsel->core.attr.enable_on_exec = 0;
|
||||
}
|
||||
|
||||
if (evlist__open(evlist) == -ENOENT) {
|
||||
err = TEST_SKIP;
|
||||
ret = evlist__open(evlist);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
err = TEST_SKIP;
|
||||
else
|
||||
pr_debug("evlist__open() failed\n");
|
||||
goto out_err;
|
||||
}
|
||||
CHECK__(evlist__open(evlist));
|
||||
|
||||
CHECK__(evlist__mmap(evlist, UINT_MAX));
|
||||
|
||||
@ -167,10 +158,12 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
goto next_event;
|
||||
|
||||
if (strcmp(event->comm.comm, comm1) == 0) {
|
||||
CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
|
||||
CHECK__(evsel__parse_sample(evsel, event, &sample));
|
||||
comm1_time = sample.time;
|
||||
}
|
||||
if (strcmp(event->comm.comm, comm2) == 0) {
|
||||
CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
|
||||
CHECK__(evsel__parse_sample(evsel, event, &sample));
|
||||
comm2_time = sample.time;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user