From 87195a1ee332add27bd51448c6b54aad551a28f5 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 10 Sep 2024 10:43:12 -0700 Subject: [PATCH 01/32] uprobes: switch to RCU Tasks Trace flavor for better performance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch switches uprobes SRCU usage to RCU Tasks Trace flavor, which is optimized for more lightweight and quick readers (at the expense of slower writers, which for uprobes is a fine tradeof) and has better performance and scalability with number of CPUs. Similarly to baseline vs SRCU, we've benchmarked SRCU-based implementation vs RCU Tasks Trace implementation. SRCU ==== uprobe-nop ( 1 cpus): 3.276 ± 0.005M/s ( 3.276M/s/cpu) uprobe-nop ( 2 cpus): 4.125 ± 0.002M/s ( 2.063M/s/cpu) uprobe-nop ( 4 cpus): 7.713 ± 0.002M/s ( 1.928M/s/cpu) uprobe-nop ( 8 cpus): 8.097 ± 0.006M/s ( 1.012M/s/cpu) uprobe-nop (16 cpus): 6.501 ± 0.056M/s ( 0.406M/s/cpu) uprobe-nop (32 cpus): 4.398 ± 0.084M/s ( 0.137M/s/cpu) uprobe-nop (64 cpus): 6.452 ± 0.000M/s ( 0.101M/s/cpu) uretprobe-nop ( 1 cpus): 2.055 ± 0.001M/s ( 2.055M/s/cpu) uretprobe-nop ( 2 cpus): 2.677 ± 0.000M/s ( 1.339M/s/cpu) uretprobe-nop ( 4 cpus): 4.561 ± 0.003M/s ( 1.140M/s/cpu) uretprobe-nop ( 8 cpus): 5.291 ± 0.002M/s ( 0.661M/s/cpu) uretprobe-nop (16 cpus): 5.065 ± 0.019M/s ( 0.317M/s/cpu) uretprobe-nop (32 cpus): 3.622 ± 0.003M/s ( 0.113M/s/cpu) uretprobe-nop (64 cpus): 3.723 ± 0.002M/s ( 0.058M/s/cpu) RCU Tasks Trace =============== uprobe-nop ( 1 cpus): 3.396 ± 0.002M/s ( 3.396M/s/cpu) uprobe-nop ( 2 cpus): 4.271 ± 0.006M/s ( 2.135M/s/cpu) uprobe-nop ( 4 cpus): 8.499 ± 0.015M/s ( 2.125M/s/cpu) uprobe-nop ( 8 cpus): 10.355 ± 0.028M/s ( 1.294M/s/cpu) uprobe-nop (16 cpus): 7.615 ± 0.099M/s ( 0.476M/s/cpu) uprobe-nop (32 cpus): 4.430 ± 0.007M/s ( 0.138M/s/cpu) uprobe-nop (64 cpus): 6.887 ± 0.020M/s ( 0.108M/s/cpu) uretprobe-nop ( 1 cpus): 2.174 ± 0.001M/s ( 2.174M/s/cpu) uretprobe-nop ( 2 cpus): 2.853 ± 0.001M/s ( 1.426M/s/cpu) uretprobe-nop ( 4 cpus): 4.913 ± 0.002M/s ( 1.228M/s/cpu) uretprobe-nop ( 8 cpus): 5.883 ± 0.002M/s ( 0.735M/s/cpu) uretprobe-nop (16 cpus): 5.147 ± 0.001M/s ( 0.322M/s/cpu) uretprobe-nop (32 cpus): 3.738 ± 0.008M/s ( 0.117M/s/cpu) uretprobe-nop (64 cpus): 4.397 ± 0.002M/s ( 0.069M/s/cpu) Peak throughput for uprobes increases from 8 mln/s to 10.3 mln/s (+28%!), and for uretprobes from 5.3 mln/s to 5.8 mln/s (+11%), as we have more work to do on uretprobes side. Even single-thread (no contention) performance is slightly better: 3.276 mln/s to 3.396 mln/s (+3.5%) for uprobes, and 2.055 mln/s to 2.174 mln/s (+5.8%) for uretprobes. We also select TASKS_TRACE_RCU for UPROBES in Kconfig due to the new dependency. Signed-off-by: Andrii Nakryiko Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Oleg Nesterov Link: https://lkml.kernel.org/r/20240910174312.3646590-1-andrii@kernel.org --- arch/Kconfig | 1 + kernel/events/uprobes.c | 38 ++++++++++++++++---------------------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 8af374ea1adc..5b5c21676a0f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -135,6 +135,7 @@ config KPROBES_ON_FTRACE config UPROBES def_bool n depends on ARCH_SUPPORTS_UPROBES + select TASKS_TRACE_RCU help Uprobes is the user-space counterpart to kprobes: they enable instrumentation applications (such as 'perf probe') diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 4b52cb2ae6d6..5106dc181387 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -42,8 +43,6 @@ static struct rb_root uprobes_tree = RB_ROOT; static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); -DEFINE_STATIC_SRCU(uprobes_srcu); - #define UPROBES_HASH_SZ 13 /* serialize uprobe->pending_list */ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; @@ -651,7 +650,7 @@ static void put_uprobe(struct uprobe *uprobe) delayed_uprobe_remove(uprobe, NULL); mutex_unlock(&delayed_uprobe_lock); - call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu); + call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu); } static __always_inline @@ -706,7 +705,7 @@ static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) struct rb_node *node; unsigned int seq; - lockdep_assert(srcu_read_lock_held(&uprobes_srcu)); + lockdep_assert(rcu_read_lock_trace_held()); do { seq = read_seqcount_begin(&uprobes_seqcount); @@ -934,8 +933,7 @@ static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) bool ret = false; down_read(&uprobe->consumer_rwsem); - list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { ret = consumer_filter(uc, mm); if (ret) break; @@ -1156,7 +1154,7 @@ void uprobe_unregister_sync(void) * unlucky enough caller can free consumer's memory and cause * handler_chain() or handle_uretprobe_chain() to do an use-after-free. */ - synchronize_srcu(&uprobes_srcu); + synchronize_rcu_tasks_trace(); } EXPORT_SYMBOL_GPL(uprobe_unregister_sync); @@ -1240,19 +1238,18 @@ EXPORT_SYMBOL_GPL(uprobe_register); int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) { struct uprobe_consumer *con; - int ret = -ENOENT, srcu_idx; + int ret = -ENOENT; down_write(&uprobe->register_rwsem); - srcu_idx = srcu_read_lock(&uprobes_srcu); - list_for_each_entry_srcu(con, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + rcu_read_lock_trace(); + list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (con == uc) { ret = register_for_each_vma(uprobe, add ? uc : NULL); break; } } - srcu_read_unlock(&uprobes_srcu, srcu_idx); + rcu_read_unlock_trace(); up_write(&uprobe->register_rwsem); @@ -2134,8 +2131,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) current->utask->auprobe = &uprobe->arch; - list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { int rc = 0; if (uc->handler) { @@ -2173,15 +2169,13 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) { struct uprobe *uprobe = ri->uprobe; struct uprobe_consumer *uc; - int srcu_idx; - srcu_idx = srcu_read_lock(&uprobes_srcu); - list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + rcu_read_lock_trace(); + list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (uc->ret_handler) uc->ret_handler(uc, ri->func, regs); } - srcu_read_unlock(&uprobes_srcu, srcu_idx); + rcu_read_unlock_trace(); } static struct return_instance *find_next_ret_chain(struct return_instance *ri) @@ -2266,13 +2260,13 @@ static void handle_swbp(struct pt_regs *regs) { struct uprobe *uprobe; unsigned long bp_vaddr; - int is_swbp, srcu_idx; + int is_swbp; bp_vaddr = uprobe_get_swbp_addr(regs); if (bp_vaddr == uprobe_get_trampoline_vaddr()) return uprobe_handle_trampoline(regs); - srcu_idx = srcu_read_lock(&uprobes_srcu); + rcu_read_lock_trace(); uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); if (!uprobe) { @@ -2330,7 +2324,7 @@ static void handle_swbp(struct pt_regs *regs) out: /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ - srcu_read_unlock(&uprobes_srcu, srcu_idx); + rcu_read_unlock_trace(); } /* From 79390db9eb32b2ba63c6be9fb83f12617259011d Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:50 +0000 Subject: [PATCH 02/32] perf/x86: Refine hybrid_pmu_type defination Use macros instead of magic number to define hybrid_pmu_type and remove X86_HYBRID_NUM_PMUS since it's never used. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-2-dapeng1.mi@linux.intel.com --- arch/x86/events/perf_event.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index ac1182141bf6..fdd7d0369d42 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -674,18 +674,16 @@ enum hybrid_cpu_type { HYBRID_INTEL_CORE = 0x40, }; -enum hybrid_pmu_type { - not_hybrid, - hybrid_small = BIT(0), - hybrid_big = BIT(1), - - hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ -}; - #define X86_HYBRID_PMU_ATOM_IDX 0 #define X86_HYBRID_PMU_CORE_IDX 1 -#define X86_HYBRID_NUM_PMUS 2 +enum hybrid_pmu_type { + not_hybrid, + hybrid_small = BIT(X86_HYBRID_PMU_ATOM_IDX), + hybrid_big = BIT(X86_HYBRID_PMU_CORE_IDX), + + hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ +}; struct x86_hybrid_pmu { struct pmu pmu; From 2eb2802a41a222bf8d78a88f193ce665071c869e Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:51 +0000 Subject: [PATCH 03/32] x86/cpu/intel: Define helper to get CPU core native ID Define helper get_this_hybrid_cpu_native_id() to return the CPU core native ID. This core native ID combining with core type can be used to figure out the CPU core uarch uniquely. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-3-dapeng1.mi@linux.intel.com --- arch/x86/include/asm/cpu.h | 6 ++++++ arch/x86/kernel/cpu/intel.c | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index aa30fd8cad7f..5af69b5be2fb 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -32,6 +32,7 @@ extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_guest_split_lock(unsigned long ip); extern void handle_bus_lock(struct pt_regs *regs); u8 get_this_hybrid_cpu_type(void); +u32 get_this_hybrid_cpu_native_id(void); #else static inline void __init sld_setup(struct cpuinfo_x86 *c) {} static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) @@ -50,6 +51,11 @@ static inline u8 get_this_hybrid_cpu_type(void) { return 0; } + +static inline u32 get_this_hybrid_cpu_native_id(void) +{ + return 0; +} #endif #ifdef CONFIG_IA32_FEAT_CTL void init_ia32_feat_ctl(struct cpuinfo_x86 *c); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index e7656cbef68d..624397e43ac6 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1299,3 +1299,18 @@ u8 get_this_hybrid_cpu_type(void) return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT; } + +/** + * get_this_hybrid_cpu_native_id() - Get the native id of this hybrid CPU + * + * Returns the uarch native ID [23:0] of a CPU in a hybrid processor. + * If the processor is not hybrid, returns 0. + */ +u32 get_this_hybrid_cpu_native_id(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) + return 0; + + return cpuid_eax(0x0000001a) & + (BIT_ULL(X86_HYBRID_CPU_TYPE_ID_SHIFT) - 1); +} From 9f4a39757c81d532f64232702537c53ad4092a5e Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:52 +0000 Subject: [PATCH 04/32] perf/x86/intel: Support hybrid PMU with multiple atom uarchs The upcoming ARL-H hybrid processor contains 2 different atom uarchs which have different PMU capabilities. To distinguish these atom uarchs, CPUID.1AH.EAX[23:0] defines a native model ID which can be used to uniquely identify the uarch of the core by combining with core type. Thus a 3rd hybrid pmu type "hybrid_tiny" is defined to mark the 2nd atom uarch. The helper find_hybrid_pmu_for_cpu() would compare the hybrid pmu type and dynamically read core native id from cpu to identify the corresponding hybrid pmu structure. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-4-dapeng1.mi@linux.intel.com --- arch/x86/events/intel/core.c | 28 +++++++++++++++++++--------- arch/x86/events/perf_event.h | 18 +++++++++++++++++- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d879478db3f5..6b9884ee96e9 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4924,17 +4924,26 @@ static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) /* * This essentially just maps between the 'hybrid_cpu_type' - * and 'hybrid_pmu_type' enums: + * and 'hybrid_pmu_type' enums except for ARL-H processor + * which needs to compare atom uarch native id since ARL-H + * contains two different atom uarchs. */ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; + u32 native_id; - if (cpu_type == HYBRID_INTEL_CORE && - pmu_type == hybrid_big) - return &x86_pmu.hybrid_pmu[i]; - if (cpu_type == HYBRID_INTEL_ATOM && - pmu_type == hybrid_small) + if (cpu_type == HYBRID_INTEL_CORE && pmu_type == hybrid_big) return &x86_pmu.hybrid_pmu[i]; + if (cpu_type == HYBRID_INTEL_ATOM) { + if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small) + return &x86_pmu.hybrid_pmu[i]; + + native_id = get_this_hybrid_cpu_native_id(); + if (native_id == skt_native_id && pmu_type == hybrid_small) + return &x86_pmu.hybrid_pmu[i]; + if (native_id == cmt_native_id && pmu_type == hybrid_tiny) + return &x86_pmu.hybrid_pmu[i]; + } } return NULL; @@ -6238,8 +6247,9 @@ static inline int intel_pmu_v6_addr_offset(int index, bool eventsel) } static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { - { hybrid_small, "cpu_atom" }, - { hybrid_big, "cpu_core" }, + { hybrid_small, "cpu_atom" }, + { hybrid_big, "cpu_core" }, + { hybrid_tiny, "cpu_lowpower" }, }; static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) @@ -6272,7 +6282,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - if (pmu->pmu_type & hybrid_small) { + if (pmu->pmu_type & hybrid_small_tiny) { pmu->intel_cap.perf_metrics = 0; pmu->intel_cap.pebs_output_pt_available = 1; pmu->mid_ack = true; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index fdd7d0369d42..909467d6d7ed 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -668,6 +668,12 @@ enum { #define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10 #define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1) +/* + * CPUID.1AH.EAX[31:0] uniquely identifies the microarchitecture + * of the core. Bits 31-24 indicates its core type (Core or Atom) + * and Bits [23:0] indicates the native model ID of the core. + * Core type and native model ID are defined in below enumerations. + */ enum hybrid_cpu_type { HYBRID_INTEL_NONE, HYBRID_INTEL_ATOM = 0x20, @@ -676,13 +682,23 @@ enum hybrid_cpu_type { #define X86_HYBRID_PMU_ATOM_IDX 0 #define X86_HYBRID_PMU_CORE_IDX 1 +#define X86_HYBRID_PMU_TINY_IDX 2 enum hybrid_pmu_type { not_hybrid, hybrid_small = BIT(X86_HYBRID_PMU_ATOM_IDX), hybrid_big = BIT(X86_HYBRID_PMU_CORE_IDX), + hybrid_tiny = BIT(X86_HYBRID_PMU_TINY_IDX), - hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ + /* The belows are only used for matching */ + hybrid_big_small = hybrid_big | hybrid_small, + hybrid_small_tiny = hybrid_small | hybrid_tiny, + hybrid_big_small_tiny = hybrid_big | hybrid_small_tiny, +}; + +enum atom_native_id { + cmt_native_id = 0x2, /* Crestmont */ + skt_native_id = 0x3, /* Skymont */ }; struct x86_hybrid_pmu { From d3fe6f0a4372702e2cdabf19e03b815811671c7a Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:53 +0000 Subject: [PATCH 05/32] perf/x86/intel: Add PMU support for ArrowLake-H ArrowLake-H contains 3 different uarchs, LionCove, Skymont and Crestmont. It is different with previous hybrid processors which only contains two kinds of uarchs. This patch adds PMU support for ArrowLake-H processor, adds ARL-H specific events which supports the 3 kinds of uarchs, such as td_retiring_arl_h, and extends some existed format attributes like offcore_rsp to make them be available to support ARL-H as well. Althrough these format attributes like offcore_rsp have been extended to support ARL-H, they can still support the regular hybrid platforms with 2 kinds of uarchs since the helper hybrid_format_is_visible() would filter PMU types and only show the format attribute for available PMUs. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-5-dapeng1.mi@linux.intel.com --- arch/x86/events/intel/core.c | 105 ++++++++++++++++++++++++++++++++++- arch/x86/events/intel/ds.c | 21 +++++++ arch/x86/events/perf_event.h | 4 ++ 3 files changed, 127 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6b9884ee96e9..7ca40002a19b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4599,6 +4599,28 @@ static inline bool erratum_hsw11(struct perf_event *event) X86_CONFIG(.event=0xc0, .umask=0x01); } +static struct event_constraint * +arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->pmu_type == hybrid_tiny) + return cmt_get_event_constraints(cpuc, idx, event); + + return mtl_get_event_constraints(cpuc, idx, event); +} + +static int arl_h_hw_config(struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->pmu_type == hybrid_tiny) + return intel_pmu_hw_config(event); + + return adl_hw_config(event); +} + /* * The HSW11 requires a period larger than 100 which is the same as the BDM11. * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL. @@ -5974,6 +5996,37 @@ static struct attribute *lnl_hybrid_events_attrs[] = { NULL }; +/* The event string must be in PMU IDX order. */ +EVENT_ATTR_STR_HYBRID(topdown-retiring, + td_retiring_arl_h, + "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(topdown-bad-spec, + td_bad_spec_arl_h, + "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(topdown-fe-bound, + td_fe_bound_arl_h, + "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(topdown-be-bound, + td_be_bound_arl_h, + "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0", + hybrid_big_small_tiny); + +static struct attribute *arl_h_hybrid_events_attrs[] = { + EVENT_PTR(slots_adl), + EVENT_PTR(td_retiring_arl_h), + EVENT_PTR(td_bad_spec_arl_h), + EVENT_PTR(td_fe_bound_arl_h), + EVENT_PTR(td_be_bound_arl_h), + EVENT_PTR(td_heavy_ops_adl), + EVENT_PTR(td_br_mis_adl), + EVENT_PTR(td_fetch_lat_adl), + EVENT_PTR(td_mem_bound_adl), + NULL, +}; + /* Must be in IDX order */ EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small); @@ -5992,6 +6045,21 @@ static struct attribute *mtl_hybrid_mem_attrs[] = { NULL }; +EVENT_ATTR_STR_HYBRID(mem-loads, + mem_ld_arl_h, + "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(mem-stores, + mem_st_arl_h, + "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6", + hybrid_big_small_tiny); + +static struct attribute *arl_h_hybrid_mem_attrs[] = { + EVENT_PTR(mem_ld_arl_h), + EVENT_PTR(mem_st_arl_h), + NULL, +}; + EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big); EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big); EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big); @@ -6015,8 +6083,8 @@ static struct attribute *adl_hybrid_tsx_attrs[] = { FORMAT_ATTR_HYBRID(in_tx, hybrid_big); FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big); -FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small); -FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small); +FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny); +FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small_tiny); FORMAT_ATTR_HYBRID(frontend, hybrid_big); #define ADL_HYBRID_RTM_FORMAT_ATTR \ @@ -6039,7 +6107,7 @@ static struct attribute *adl_hybrid_extra_attr[] = { NULL }; -FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small); +FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small_tiny); static struct attribute *mtl_hybrid_extra_attr_rtm[] = { ADL_HYBRID_RTM_FORMAT_ATTR, @@ -7121,6 +7189,37 @@ __init int intel_pmu_init(void) name = "lunarlake_hybrid"; break; + case INTEL_ARROWLAKE_H: + intel_pmu_init_hybrid(hybrid_big_small_tiny); + + x86_pmu.pebs_latency_data = arl_h_latency_data; + x86_pmu.get_event_constraints = arl_h_get_event_constraints; + x86_pmu.hw_config = arl_h_hw_config; + + td_attr = arl_h_hybrid_events_attrs; + mem_attr = arl_h_hybrid_mem_attrs; + tsx_attr = adl_hybrid_tsx_attrs; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; + + /* Initialize big core specific PerfMon capabilities. */ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; + intel_pmu_init_lnc(&pmu->pmu); + + /* Initialize Atom core specific PerfMon capabilities. */ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; + intel_pmu_init_skt(&pmu->pmu); + + /* Initialize Lower Power Atom specific PerfMon capabilities. */ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX]; + intel_pmu_init_grt(&pmu->pmu); + pmu->extra_regs = intel_cmt_extra_regs; + + intel_pmu_pebs_data_source_arl_h(); + pr_cont("ArrowLake-H Hybrid events, "); + name = "arrowlake_h_hybrid"; + break; + default: switch (x86_pmu.version) { case 1: diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index fa5ea65de0d0..8afc4ad3cd16 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -177,6 +177,17 @@ void __init intel_pmu_pebs_data_source_mtl(void) __intel_pmu_pebs_data_source_cmt(data_source); } +void __init intel_pmu_pebs_data_source_arl_h(void) +{ + u64 *data_source; + + intel_pmu_pebs_data_source_lnl(); + + data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX].pebs_data_source; + memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); + __intel_pmu_pebs_data_source_cmt(data_source); +} + void __init intel_pmu_pebs_data_source_cmt(void) { __intel_pmu_pebs_data_source_cmt(pebs_data_source); @@ -388,6 +399,16 @@ u64 lnl_latency_data(struct perf_event *event, u64 status) return lnc_latency_data(event, status); } +u64 arl_h_latency_data(struct perf_event *event, u64 status) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->pmu_type == hybrid_tiny) + return cmt_latency_data(event, status); + + return lnl_latency_data(event, status); +} + static u64 load_latency_data(struct perf_event *event, u64 status) { union intel_x86_pebs_dse dse; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 909467d6d7ed..82c6f45ce975 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1592,6 +1592,8 @@ u64 cmt_latency_data(struct perf_event *event, u64 status); u64 lnl_latency_data(struct perf_event *event, u64 status); +u64 arl_h_latency_data(struct perf_event *event, u64 status); + extern struct event_constraint intel_core2_pebs_event_constraints[]; extern struct event_constraint intel_atom_pebs_event_constraints[]; @@ -1711,6 +1713,8 @@ void intel_pmu_pebs_data_source_grt(void); void intel_pmu_pebs_data_source_mtl(void); +void intel_pmu_pebs_data_source_arl_h(void); + void intel_pmu_pebs_data_source_cmt(void); void intel_pmu_pebs_data_source_lnl(void); From b302d5a6fff5dd7ddb1e4752d60c0eaa4cc4f7f3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:30 +0200 Subject: [PATCH 06/32] uprobes: don't abuse get_utask() in pre_ssout() and prepare_uretprobe() handle_swbp() calls get_utask() before prepare_uretprobe() or pre_ssout() can be called, they can simply use current->utask which can't be NULL. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144230.GA9468@redhat.com --- kernel/events/uprobes.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 5106dc181387..15e91a38d327 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1905,18 +1905,14 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained, static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) { - struct return_instance *ri; - struct uprobe_task *utask; + struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; + struct return_instance *ri; bool chained; if (!get_xol_area()) return; - utask = get_utask(); - if (!utask) - return; - if (utask->depth >= MAX_URETPROBE_DEPTH) { printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" " nestedness limit pid/tgid=%d/%d\n", @@ -1977,14 +1973,10 @@ fail: static int pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { - struct uprobe_task *utask; + struct uprobe_task *utask = current->utask; unsigned long xol_vaddr; int err; - utask = get_utask(); - if (!utask) - return -ENOMEM; - if (!try_get_uprobe(uprobe)) return -EINVAL; From c7b4133c48445dde789ed30b19ccb0448c7593f7 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:35 +0200 Subject: [PATCH 07/32] uprobes: sanitiize xol_free_insn_slot() 1. Clear utask->xol_vaddr unconditionally, even if this addr is not valid, xol_free_insn_slot() should never return with utask->xol_vaddr != NULL. 2. Add a comment to explain why do we need to validate slot_addr. 3. Simplify the validation above. We can simply check offset < PAGE_SIZE, unsigned underflows are fine, it should work if slot_addr < area->vaddr. 4. Kill the unnecessary "slot_nr >= UINSNS_PER_PAGE" check, slot_nr must be valid if offset < PAGE_SIZE. The next patches will cleanup this function even more. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144235.GA9471@redhat.com --- kernel/events/uprobes.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 15e91a38d327..3f38be1e736b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1680,8 +1680,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) static void xol_free_insn_slot(struct task_struct *tsk) { struct xol_area *area; - unsigned long vma_end; unsigned long slot_addr; + unsigned long offset; if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) return; @@ -1690,24 +1690,21 @@ static void xol_free_insn_slot(struct task_struct *tsk) if (unlikely(!slot_addr)) return; + tsk->utask->xol_vaddr = 0; area = tsk->mm->uprobes_state.xol_area; - vma_end = area->vaddr + PAGE_SIZE; - if (area->vaddr <= slot_addr && slot_addr < vma_end) { - unsigned long offset; - int slot_nr; - - offset = slot_addr - area->vaddr; - slot_nr = offset / UPROBE_XOL_SLOT_BYTES; - if (slot_nr >= UINSNS_PER_PAGE) - return; + offset = slot_addr - area->vaddr; + /* + * slot_addr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). + * This check can only fail if the "[uprobes]" vma was mremap'ed. + */ + if (offset < PAGE_SIZE) { + int slot_nr = offset / UPROBE_XOL_SLOT_BYTES; clear_bit(slot_nr, area->bitmap); atomic_dec(&area->slot_count); smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ if (waitqueue_active(&area->wq)) wake_up(&area->wq); - - tsk->utask->xol_vaddr = 0; } } From 430af825ba991730f8acc3c804a4aef82e9f7ff6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:39 +0200 Subject: [PATCH 08/32] uprobes: kill the unnecessary put_uprobe/xol_free_insn_slot in uprobe_free_utask() If pre_ssout() succeeds and sets utask->active_uprobe and utask->xol_vaddr the task must not exit until it calls handle_singlestep() which does the necessary put_uprobe() and xol_free_insn_slot(). Remove put_uprobe() and xol_free_insn_slot() from uprobe_free_utask(). With this change xol_free_insn_slot() can't hit xol_area/utask/xol_vaddr == NULL, we can kill the unnecessary checks checks and simplify this function more. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144239.GA9475@redhat.com --- kernel/events/uprobes.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3f38be1e736b..03035a859a56 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1673,28 +1673,16 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) } /* - * xol_free_insn_slot - If slot was earlier allocated by - * @xol_get_insn_slot(), make the slot available for - * subsequent requests. + * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot() */ static void xol_free_insn_slot(struct task_struct *tsk) { - struct xol_area *area; - unsigned long slot_addr; - unsigned long offset; - - if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) - return; - - slot_addr = tsk->utask->xol_vaddr; - if (unlikely(!slot_addr)) - return; + struct xol_area *area = tsk->mm->uprobes_state.xol_area; + unsigned long offset = tsk->utask->xol_vaddr - area->vaddr; tsk->utask->xol_vaddr = 0; - area = tsk->mm->uprobes_state.xol_area; - offset = slot_addr - area->vaddr; /* - * slot_addr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). + * xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). * This check can only fail if the "[uprobes]" vma was mremap'ed. */ if (offset < PAGE_SIZE) { @@ -1764,14 +1752,12 @@ void uprobe_free_utask(struct task_struct *t) if (!utask) return; - if (utask->active_uprobe) - put_uprobe(utask->active_uprobe); + WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr); ri = utask->return_instances; while (ri) ri = free_ret_instance(ri); - xol_free_insn_slot(t); kfree(utask); t->utask = NULL; } From 6ffe8c7d871b327d16ae6b6f1db4c8ecb0f15c64 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:44 +0200 Subject: [PATCH 09/32] uprobes: simplify xol_take_insn_slot() and its caller The do / while (slot_nr >= UINSNS_PER_PAGE) loop in xol_take_insn_slot() makes no sense, the checked condition is always true. Change this code to use the "for (;;)" loop, this way we do not need to change slot_nr if test_and_set_bit() fails. Also, kill the unnecessary xol_vaddr != NULL check in xol_get_insn_slot(), xol_take_insn_slot() never returns NULL. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144244.GA9480@redhat.com --- kernel/events/uprobes.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 03035a859a56..616b5ff3fd85 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1628,25 +1628,20 @@ void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) */ static unsigned long xol_take_insn_slot(struct xol_area *area) { - unsigned long slot_addr; - int slot_nr; + unsigned int slot_nr; - do { + for (;;) { slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); if (slot_nr < UINSNS_PER_PAGE) { if (!test_and_set_bit(slot_nr, area->bitmap)) break; - - slot_nr = UINSNS_PER_PAGE; continue; } wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); - } while (slot_nr >= UINSNS_PER_PAGE); + } - slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); atomic_inc(&area->slot_count); - - return slot_addr; + return area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; } /* @@ -1663,12 +1658,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) return 0; xol_vaddr = xol_take_insn_slot(area); - if (unlikely(!xol_vaddr)) - return 0; - arch_uprobe_copy_ixol(area->page, xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); - return xol_vaddr; } From 1cee988c1d21eabc936d1401811012522083e36f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:48 +0200 Subject: [PATCH 10/32] uprobes: move the initialization of utask->xol_vaddr from pre_ssout() to xol_get_insn_slot() This simplifies the code and makes xol_get_insn_slot() symmetric with xol_free_insn_slot() which clears utask->xol_vaddr. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144248.GA9483@redhat.com --- kernel/events/uprobes.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 616b5ff3fd85..dfaca306443d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1646,21 +1646,19 @@ static unsigned long xol_take_insn_slot(struct xol_area *area) /* * xol_get_insn_slot - allocate a slot for xol. - * Returns the allocated slot address or 0. */ -static unsigned long xol_get_insn_slot(struct uprobe *uprobe) +static bool xol_get_insn_slot(struct uprobe *uprobe) { - struct xol_area *area; - unsigned long xol_vaddr; + struct uprobe_task *utask = current->utask; + struct xol_area *area = get_xol_area(); - area = get_xol_area(); if (!area) - return 0; + return false; - xol_vaddr = xol_take_insn_slot(area); - arch_uprobe_copy_ixol(area->page, xol_vaddr, + utask->xol_vaddr = xol_take_insn_slot(area); + arch_uprobe_copy_ixol(area->page, utask->xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); - return xol_vaddr; + return true; } /* @@ -1948,21 +1946,17 @@ static int pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { struct uprobe_task *utask = current->utask; - unsigned long xol_vaddr; int err; if (!try_get_uprobe(uprobe)) return -EINVAL; - xol_vaddr = xol_get_insn_slot(uprobe); - if (!xol_vaddr) { + if (!xol_get_insn_slot(uprobe)) { err = -ENOMEM; goto err_out; } - utask->xol_vaddr = xol_vaddr; utask->vaddr = bp_vaddr; - err = arch_uprobe_pre_xol(&uprobe->arch, regs); if (unlikely(err)) { xol_free_insn_slot(current); From c5356ab1db28cafc448a50c26ba84442237abb98 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:53 +0200 Subject: [PATCH 11/32] uprobes: pass utask to xol_get_insn_slot() and xol_free_insn_slot() Add the "struct uprobe_task *utask" argument to xol_get_insn_slot() and xol_free_insn_slot(), their callers already have it so we can avoid the unnecessary dereference and simplify the code. Kill the "tsk" argument of xol_free_insn_slot(), it is always current. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144253.GA9487@redhat.com --- kernel/events/uprobes.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index dfaca306443d..c9f1e1e56b15 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1647,9 +1647,8 @@ static unsigned long xol_take_insn_slot(struct xol_area *area) /* * xol_get_insn_slot - allocate a slot for xol. */ -static bool xol_get_insn_slot(struct uprobe *uprobe) +static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) { - struct uprobe_task *utask = current->utask; struct xol_area *area = get_xol_area(); if (!area) @@ -1664,12 +1663,12 @@ static bool xol_get_insn_slot(struct uprobe *uprobe) /* * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot() */ -static void xol_free_insn_slot(struct task_struct *tsk) +static void xol_free_insn_slot(struct uprobe_task *utask) { - struct xol_area *area = tsk->mm->uprobes_state.xol_area; - unsigned long offset = tsk->utask->xol_vaddr - area->vaddr; + struct xol_area *area = current->mm->uprobes_state.xol_area; + unsigned long offset = utask->xol_vaddr - area->vaddr; - tsk->utask->xol_vaddr = 0; + utask->xol_vaddr = 0; /* * xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). * This check can only fail if the "[uprobes]" vma was mremap'ed. @@ -1951,7 +1950,7 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) if (!try_get_uprobe(uprobe)) return -EINVAL; - if (!xol_get_insn_slot(uprobe)) { + if (!xol_get_insn_slot(uprobe, utask)) { err = -ENOMEM; goto err_out; } @@ -1959,7 +1958,7 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) utask->vaddr = bp_vaddr; err = arch_uprobe_pre_xol(&uprobe->arch, regs); if (unlikely(err)) { - xol_free_insn_slot(current); + xol_free_insn_slot(utask); goto err_out; } @@ -2307,7 +2306,7 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) put_uprobe(uprobe); utask->active_uprobe = NULL; utask->state = UTASK_RUNNING; - xol_free_insn_slot(current); + xol_free_insn_slot(utask); spin_lock_irq(¤t->sighand->siglock); recalc_sigpending(); /* see uprobe_deny_signal() */ From c16e2fdd746c78f5b2ce3c2ab8a26a61b6ed09e5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:58 +0200 Subject: [PATCH 12/32] uprobes: deny mremap(xol_vma) kernel/events/uprobes.c assumes that xol_area->vaddr is always correct but a malicious application can remap its "[uprobes]" vma to another adress to confuse the kernel. Introduce xol_mremap() to make this impossible. With this change utask->xol_vaddr in xol_free_insn_slot() can't be invalid, we can turn the offset check into WARN_ON_ONCE(offset >= PAGE_SIZE). Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144258.GA9492@redhat.com --- kernel/events/uprobes.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c9f1e1e56b15..d3538b6c0831 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1472,9 +1472,15 @@ static vm_fault_t xol_fault(const struct vm_special_mapping *sm, return 0; } +static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) +{ + return -EPERM; +} + static const struct vm_special_mapping xol_mapping = { .name = "[uprobes]", .fault = xol_fault, + .mremap = xol_mremap, }; /* Slot allocation for XOL */ @@ -1667,21 +1673,19 @@ static void xol_free_insn_slot(struct uprobe_task *utask) { struct xol_area *area = current->mm->uprobes_state.xol_area; unsigned long offset = utask->xol_vaddr - area->vaddr; + unsigned int slot_nr; utask->xol_vaddr = 0; - /* - * xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). - * This check can only fail if the "[uprobes]" vma was mremap'ed. - */ - if (offset < PAGE_SIZE) { - int slot_nr = offset / UPROBE_XOL_SLOT_BYTES; + /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */ + if (WARN_ON_ONCE(offset >= PAGE_SIZE)) + return; - clear_bit(slot_nr, area->bitmap); - atomic_dec(&area->slot_count); - smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ - if (waitqueue_active(&area->wq)) - wake_up(&area->wq); - } + slot_nr = offset / UPROBE_XOL_SLOT_BYTES; + clear_bit(slot_nr, area->bitmap); + atomic_dec(&area->slot_count); + smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ + if (waitqueue_active(&area->wq)) + wake_up(&area->wq); } void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, From 7a166094bd2b1c084fd215747f9cd05a853d66c9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 1 Oct 2024 16:24:59 +0200 Subject: [PATCH 13/32] uprobes: kill xol_area->slot_count Add the new helper, xol_get_slot_nr() which does find_first_zero_bit() + test_and_set_bit(). xol_take_insn_slot() can wait for the "xol_get_slot_nr() < UINSNS_PER_PAGE" event instead of "area->slot_count < UINSNS_PER_PAGE". So we can kill area->slot_count and avoid atomic_inc() + atomic_dec(), this simplifies the code and can slightly improve the performance. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241001142458.GA13629@redhat.com --- kernel/events/uprobes.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d3538b6c0831..a1c801e8333c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -99,7 +99,6 @@ static LIST_HEAD(delayed_uprobe_list); */ struct xol_area { wait_queue_head_t wq; /* if all slots are busy */ - atomic_t slot_count; /* number of in-use slots */ unsigned long *bitmap; /* 0 = free slot */ struct page *page; @@ -1556,7 +1555,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) init_waitqueue_head(&area->wq); /* Reserve the 1st slot for get_trampoline_vaddr() */ set_bit(0, area->bitmap); - atomic_set(&area->slot_count, 1); insns = arch_uprobe_trampoline(&insns_size); arch_uprobe_copy_ixol(area->page, 0, insns, insns_size); @@ -1629,24 +1627,28 @@ void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) } } +static unsigned long xol_get_slot_nr(struct xol_area *area) +{ + unsigned long slot_nr; + + slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); + if (slot_nr < UINSNS_PER_PAGE) { + if (!test_and_set_bit(slot_nr, area->bitmap)) + return slot_nr; + } + + return UINSNS_PER_PAGE; +} + /* * - search for a free slot. */ static unsigned long xol_take_insn_slot(struct xol_area *area) { - unsigned int slot_nr; + unsigned long slot_nr; - for (;;) { - slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); - if (slot_nr < UINSNS_PER_PAGE) { - if (!test_and_set_bit(slot_nr, area->bitmap)) - break; - continue; - } - wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); - } + wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); - atomic_inc(&area->slot_count); return area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; } @@ -1682,7 +1684,6 @@ static void xol_free_insn_slot(struct uprobe_task *utask) slot_nr = offset / UPROBE_XOL_SLOT_BYTES; clear_bit(slot_nr, area->bitmap); - atomic_dec(&area->slot_count); smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ if (waitqueue_active(&area->wq)) wake_up(&area->wq); From 6c74ca7aa81a23c613b8ca52bfe0a4b3734dd287 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 1 Oct 2024 16:25:03 +0200 Subject: [PATCH 14/32] uprobes: fold xol_take_insn_slot() into xol_get_insn_slot() After the previous change xol_take_insn_slot() becomes trivial, kill it. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241001142503.GA13633@redhat.com --- kernel/events/uprobes.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a1c801e8333c..2a0059464383 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1640,29 +1640,20 @@ static unsigned long xol_get_slot_nr(struct xol_area *area) return UINSNS_PER_PAGE; } -/* - * - search for a free slot. - */ -static unsigned long xol_take_insn_slot(struct xol_area *area) -{ - unsigned long slot_nr; - - wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); - - return area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; -} - /* * xol_get_insn_slot - allocate a slot for xol. */ static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) { struct xol_area *area = get_xol_area(); + unsigned long slot_nr; if (!area) return false; - utask->xol_vaddr = xol_take_insn_slot(area); + wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); + + utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; arch_uprobe_copy_ixol(area->page, utask->xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); return true; From de20037e1b3c2f2ca97b8c12b8c7bca8abd509a7 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Tue, 1 Oct 2024 07:10:19 -0700 Subject: [PATCH 15/32] perf/x86/amd: Warn only on new bits set Warning at every leaking bits can cause a flood of message, triggering various stall-warning mechanisms to fire, including CSD locks, which makes the machine to be unusable. Track the bits that are being leaked, and only warn when a new bit is set. That said, this patch will help with the following issues: 1) It will tell us which bits are being set, so, it is easy to communicate it back to vendor, and to do a root-cause analyzes. 2) It avoid the machine to be unusable, because, worst case scenario, the user gets less than 60 WARNs (one per unhandled bit). Suggested-by: Paul E. McKenney Signed-off-by: Breno Leitao Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sandipan Das Reviewed-by: Paul E. McKenney Link: https://lkml.kernel.org/r/20241001141020.2620361-1-leitao@debian.org --- arch/x86/events/amd/core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 920e3a640cad..b4a1a2576510 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -943,11 +943,12 @@ static int amd_pmu_v2_snapshot_branch_stack(struct perf_branch_entry *entries, u static int amd_pmu_v2_handle_irq(struct pt_regs *regs) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + static atomic64_t status_warned = ATOMIC64_INIT(0); + u64 reserved, status, mask, new_bits, prev_bits; struct perf_sample_data data; struct hw_perf_event *hwc; struct perf_event *event; int handled = 0, idx; - u64 reserved, status, mask; bool pmu_enabled; /* @@ -1012,7 +1013,12 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) * the corresponding PMCs are expected to be inactive according to the * active_mask */ - WARN_ON(status > 0); + if (status > 0) { + prev_bits = atomic64_fetch_or(status, &status_warned); + // A new bit was set for the very first time. + new_bits = status & ~prev_bits; + WARN(new_bits, "New overflows for inactive PMCs: %llx\n", new_bits); + } /* Clear overflow and freeze bits */ amd_pmu_ack_global_status(~status); From da09a9e0c3eab164af950be44ee6bdea8527c3e5 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 18 Oct 2024 22:22:51 +0200 Subject: [PATCH 16/32] uprobe: Add data pointer to consumer handlers Adding data pointer to both entry and exit consumer handlers and all its users. The functionality itself is coming in following change. Signed-off-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Acked-by: Oleg Nesterov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241018202252.693462-2-jolsa@kernel.org --- include/linux/uprobes.h | 4 ++-- kernel/events/uprobes.c | 4 ++-- kernel/trace/bpf_trace.c | 6 ++++-- kernel/trace/trace_uprobe.c | 12 ++++++++---- .../testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 2 +- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 2b294bf1881f..bb265a632b91 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -37,10 +37,10 @@ struct uprobe_consumer { * for the current process. If filter() is omitted or returns true, * UPROBE_HANDLER_REMOVE is effectively ignored. */ - int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); + int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data); int (*ret_handler)(struct uprobe_consumer *self, unsigned long func, - struct pt_regs *regs); + struct pt_regs *regs, __u64 *data); bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); struct list_head cons_node; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2a0059464383..6b44c386a5df 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -2090,7 +2090,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) int rc = 0; if (uc->handler) { - rc = uc->handler(uc, regs); + rc = uc->handler(uc, regs, NULL); WARN(rc & ~UPROBE_HANDLER_MASK, "bad rc=0x%x from %ps()\n", rc, uc->handler); } @@ -2128,7 +2128,7 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (uc->ret_handler) - uc->ret_handler(uc, ri->func, regs); + uc->ret_handler(uc, ri->func, regs, NULL); } rcu_read_unlock_trace(); } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a582cd25ca87..fdab7ecd8dfa 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3244,7 +3244,8 @@ uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm) } static int -uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) +uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, + __u64 *data) { struct bpf_uprobe *uprobe; @@ -3253,7 +3254,8 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) } static int -uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs) +uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs, + __u64 *data) { struct bpf_uprobe *uprobe; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c40531d2cbad..5895eabe3581 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -89,9 +89,11 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) static int register_uprobe_event(struct trace_uprobe *tu); static int unregister_uprobe_event(struct trace_uprobe *tu); -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs, + __u64 *data); static int uretprobe_dispatcher(struct uprobe_consumer *con, - unsigned long func, struct pt_regs *regs); + unsigned long func, struct pt_regs *regs, + __u64 *data); #ifdef CONFIG_STACK_GROWSUP static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) @@ -1517,7 +1519,8 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, } } -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs, + __u64 *data) { struct trace_uprobe *tu; struct uprobe_dispatch_data udd; @@ -1548,7 +1551,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) } static int uretprobe_dispatcher(struct uprobe_consumer *con, - unsigned long func, struct pt_regs *regs) + unsigned long func, struct pt_regs *regs, + __u64 *data) { struct trace_uprobe *tu; struct uprobe_dispatch_data udd; diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 8835761d9a12..12005e3dc3e4 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -461,7 +461,7 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { static int uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, - struct pt_regs *regs) + struct pt_regs *regs, __u64 *data) { regs->ax = 0x12345678deadbeef; From 4d756095d3994cb41393817dc696b458938a6bd0 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 18 Oct 2024 22:22:52 +0200 Subject: [PATCH 17/32] uprobe: Add support for session consumer This change allows the uprobe consumer to behave as session which means that 'handler' and 'ret_handler' callbacks are connected in a way that allows to: - control execution of 'ret_handler' from 'handler' callback - share data between 'handler' and 'ret_handler' callbacks The session concept fits to our common use case where we do filtering on entry uprobe and based on the result we decide to run the return uprobe (or not). It's also convenient to share the data between session callbacks. To achive this we are adding new return value the uprobe consumer can return from 'handler' callback: UPROBE_HANDLER_IGNORE - Ignore 'ret_handler' callback for this consumer. And store cookie and pass it to 'ret_handler' when consumer has both 'handler' and 'ret_handler' callbacks defined. We store shared data in the return_consumer object array as part of the return_instance object. This way the handle_uretprobe_chain can find related return_consumer and its shared data. We also store entry handler return value, for cases when there are multiple consumers on single uprobe and some of them are ignored and some of them not, in which case the return probe gets installed and we need to have a way to find out which consumer needs to be ignored. The tricky part is when consumer is registered 'after' the uprobe entry handler is hit. In such case this consumer's 'ret_handler' gets executed as well, but it won't have the proper data pointer set, so we can filter it out. Suggested-by: Oleg Nesterov Signed-off-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Oleg Nesterov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241018202252.693462-3-jolsa@kernel.org --- include/linux/uprobes.h | 21 +++++- kernel/events/uprobes.c | 148 ++++++++++++++++++++++++++++++++-------- 2 files changed, 139 insertions(+), 30 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index bb265a632b91..dbaf04189548 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -23,8 +23,17 @@ struct inode; struct notifier_block; struct page; +/* + * Allowed return values from uprobe consumer's handler callback + * with following meaning: + * + * UPROBE_HANDLER_REMOVE + * - Remove the uprobe breakpoint from current->mm. + * UPROBE_HANDLER_IGNORE + * - Ignore ret_handler callback for this consumer. + */ #define UPROBE_HANDLER_REMOVE 1 -#define UPROBE_HANDLER_MASK 1 +#define UPROBE_HANDLER_IGNORE 2 #define MAX_URETPROBE_DEPTH 64 @@ -44,6 +53,8 @@ struct uprobe_consumer { bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); struct list_head cons_node; + + __u64 id; /* set when uprobe_consumer is registered */ }; #ifdef CONFIG_UPROBES @@ -83,14 +94,22 @@ struct uprobe_task { unsigned int depth; }; +struct return_consumer { + __u64 cookie; + __u64 id; +}; + struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ bool chained; /* true, if instance is nested */ + int consumers_cnt; struct return_instance *next; /* keep as stack */ + + struct return_consumer consumers[] __counted_by(consumers_cnt); }; enum rp_check { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6b44c386a5df..4ef4b51776eb 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -64,7 +64,7 @@ struct uprobe { struct rcu_head rcu; loff_t offset; loff_t ref_ctr_offset; - unsigned long flags; + unsigned long flags; /* "unsigned long" so bitops work */ /* * The generic code assumes that it has two members of unknown type @@ -823,8 +823,11 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) { + static atomic64_t id; + down_write(&uprobe->consumer_rwsem); list_add_rcu(&uc->cons_node, &uprobe->consumers); + uc->id = (__u64) atomic64_inc_return(&id); up_write(&uprobe->consumer_rwsem); } @@ -1761,6 +1764,34 @@ static struct uprobe_task *get_utask(void) return current->utask; } +static size_t ri_size(int consumers_cnt) +{ + struct return_instance *ri; + + return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt; +} + +#define DEF_CNT 4 + +static struct return_instance *alloc_return_instance(void) +{ + struct return_instance *ri; + + ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL); + if (!ri) + return ZERO_SIZE_PTR; + + ri->consumers_cnt = DEF_CNT; + return ri; +} + +static struct return_instance *dup_return_instance(struct return_instance *old) +{ + size_t size = ri_size(old->consumers_cnt); + + return kmemdup(old, size, GFP_KERNEL); +} + static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) { struct uprobe_task *n_utask; @@ -1773,11 +1804,10 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) p = &n_utask->return_instances; for (o = o_utask->return_instances; o; o = o->next) { - n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); + n = dup_return_instance(o); if (!n) return -ENOMEM; - *n = *o; /* * uprobe's refcnt has to be positive at this point, kept by * utask->return_instances items; return_instances can't be @@ -1870,35 +1900,31 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained, utask->return_instances = ri; } -static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) +static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, + struct return_instance *ri) { struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; - struct return_instance *ri; bool chained; if (!get_xol_area()) - return; + goto free; if (utask->depth >= MAX_URETPROBE_DEPTH) { printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" " nestedness limit pid/tgid=%d/%d\n", current->pid, current->tgid); - return; + goto free; } /* we need to bump refcount to store uprobe in utask */ if (!try_get_uprobe(uprobe)) - return; - - ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); - if (!ri) - goto fail; + goto free; trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) - goto fail; + goto put; /* drop the entries invalidated by longjmp() */ chained = (orig_ret_vaddr == trampoline_vaddr); @@ -1916,7 +1942,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) * attack from user-space. */ uprobe_warn(current, "handle tail call"); - goto fail; + goto put; } orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; } @@ -1931,9 +1957,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) utask->return_instances = ri; return; -fail: - kfree(ri); +put: put_uprobe(uprobe); +free: + kfree(ri); } /* Prepare to single-step probed instruction out of line. */ @@ -2077,34 +2104,90 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb return uprobe; } +static struct return_instance* +push_consumer(struct return_instance *ri, int idx, __u64 id, __u64 cookie) +{ + if (unlikely(ri == ZERO_SIZE_PTR)) + return ri; + + if (unlikely(idx >= ri->consumers_cnt)) { + struct return_instance *old_ri = ri; + + ri->consumers_cnt += DEF_CNT; + ri = krealloc(old_ri, ri_size(old_ri->consumers_cnt), GFP_KERNEL); + if (!ri) { + kfree(old_ri); + return ZERO_SIZE_PTR; + } + } + + ri->consumers[idx].id = id; + ri->consumers[idx].cookie = cookie; + return ri; +} + +static struct return_consumer * +return_consumer_find(struct return_instance *ri, int *iter, int id) +{ + struct return_consumer *ric; + int idx = *iter; + + for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) { + if (ric->id == id) { + *iter = idx + 1; + return ric; + } + } + return NULL; +} + +static bool ignore_ret_handler(int rc) +{ + return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE; +} + static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) { struct uprobe_consumer *uc; - int remove = UPROBE_HANDLER_REMOVE; - bool need_prep = false; /* prepare return uprobe, when needed */ - bool has_consumers = false; + bool has_consumers = false, remove = true; + struct return_instance *ri = NULL; + int push_idx = 0; current->utask->auprobe = &uprobe->arch; list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { + bool session = uc->handler && uc->ret_handler; + __u64 cookie = 0; int rc = 0; if (uc->handler) { - rc = uc->handler(uc, regs, NULL); - WARN(rc & ~UPROBE_HANDLER_MASK, + rc = uc->handler(uc, regs, &cookie); + WARN(rc < 0 || rc > 2, "bad rc=0x%x from %ps()\n", rc, uc->handler); } - if (uc->ret_handler) - need_prep = true; - - remove &= rc; + remove &= rc == UPROBE_HANDLER_REMOVE; has_consumers = true; + + if (!uc->ret_handler || ignore_ret_handler(rc)) + continue; + + if (!ri) + ri = alloc_return_instance(); + + if (session) + ri = push_consumer(ri, push_idx++, uc->id, cookie); } current->utask->auprobe = NULL; - if (need_prep && !remove) - prepare_uretprobe(uprobe, regs); /* put bp at return */ + if (!ZERO_OR_NULL_PTR(ri)) { + /* + * The push_idx value has the final number of return consumers, + * and ri->consumers_cnt has number of allocated consumers. + */ + ri->consumers_cnt = push_idx; + prepare_uretprobe(uprobe, regs, ri); + } if (remove && has_consumers) { down_read(&uprobe->register_rwsem); @@ -2123,12 +2206,19 @@ static void handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) { struct uprobe *uprobe = ri->uprobe; + struct return_consumer *ric; struct uprobe_consumer *uc; + int ric_idx = 0; rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { - if (uc->ret_handler) - uc->ret_handler(uc, ri->func, regs, NULL); + bool session = uc->handler && uc->ret_handler; + + if (uc->ret_handler) { + ric = return_consumer_find(ri, &ric_idx, uc->id); + if (!session || ric) + uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL); + } } rcu_read_unlock_trace(); } From 9b99d65c0bb4e37013bc2ec9c32b78c5751ff952 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 10 Oct 2024 07:26:03 -0700 Subject: [PATCH 18/32] perf/x86/rapl: Move the pmu allocation out of CPU hotplug There are extra codes in the CPU hotplug function to allocate rapl pmus. The generic PMU hotplug support is hard to be applied. As long as the rapl pmus can be allocated upfront for each die/socket, the code doesn't need to be implemented in the CPU hotplug function. Move the code to the init_rapl_pmus(), and allocate a PMU for each possible die/socket. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Oliver Sang Link: https://lore.kernel.org/r/20241010142604.770192-1-kan.liang@linux.intel.com --- arch/x86/events/rapl.c | 44 ++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index a481a939862e..86cbee1a8bf0 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -602,19 +602,8 @@ static int rapl_cpu_online(unsigned int cpu) struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); int target; - if (!pmu) { - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); - if (!pmu) - return -ENOMEM; - - raw_spin_lock_init(&pmu->lock); - INIT_LIST_HEAD(&pmu->active_list); - pmu->pmu = &rapl_pmus->pmu; - pmu->timer_interval = ms_to_ktime(rapl_timer_ms); - rapl_hrtimer_init(pmu); - - rapl_pmus->pmus[rapl_pmu_idx] = pmu; - } + if (!pmu) + return -ENOMEM; /* * Check if there is an online cpu in the package which collects rapl @@ -707,6 +696,32 @@ static const struct attribute_group *rapl_attr_update[] = { NULL, }; +static int __init init_rapl_pmu(void) +{ + struct rapl_pmu *pmu; + int idx; + + for (idx = 0; idx < rapl_pmus->nr_rapl_pmu; idx++) { + pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); + if (!pmu) + goto free; + + raw_spin_lock_init(&pmu->lock); + INIT_LIST_HEAD(&pmu->active_list); + pmu->pmu = &rapl_pmus->pmu; + pmu->timer_interval = ms_to_ktime(rapl_timer_ms); + rapl_hrtimer_init(pmu); + + rapl_pmus->pmus[idx] = pmu; + } + + return 0; +free: + for (; idx > 0; idx--) + kfree(rapl_pmus->pmus[idx - 1]); + return -ENOMEM; +} + static int __init init_rapl_pmus(void) { int nr_rapl_pmu = topology_max_packages(); @@ -730,7 +745,8 @@ static int __init init_rapl_pmus(void) rapl_pmus->pmu.read = rapl_pmu_event_read; rapl_pmus->pmu.module = THIS_MODULE; rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; - return 0; + + return init_rapl_pmu(); } static struct rapl_model model_snb = { From 9e9af8bbb5f9b565b9faf691f96f661791e199b0 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 10 Oct 2024 07:26:04 -0700 Subject: [PATCH 19/32] perf/x86/rapl: Clean up cpumask and hotplug The rapl pmu is die scope, which is supported by the generic perf_event subsystem now. Set the scope for the rapl PMU and remove all the cpumask and hotplug codes. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Oliver Sang Tested-by: Dhananjay Ugwekar Link: https://lore.kernel.org/r/20241010142604.770192-2-kan.liang@linux.intel.com --- arch/x86/events/rapl.c | 90 +++----------------------------------- include/linux/cpuhotplug.h | 1 - 2 files changed, 6 insertions(+), 85 deletions(-) diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index 86cbee1a8bf0..a8defc813c36 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -148,7 +148,6 @@ struct rapl_model { /* 1/2^hw_unit Joule */ static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; static struct rapl_pmus *rapl_pmus; -static cpumask_t rapl_cpu_mask; static unsigned int rapl_cntr_mask; static u64 rapl_timer_ms; static struct perf_msr *rapl_msrs; @@ -369,8 +368,6 @@ static int rapl_pmu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; - event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; - if (!cfg || cfg >= NR_RAPL_DOMAINS + 1) return -EINVAL; @@ -389,7 +386,6 @@ static int rapl_pmu_event_init(struct perf_event *event) pmu = cpu_to_rapl_pmu(event->cpu); if (!pmu) return -EINVAL; - event->cpu = pmu->cpu; event->pmu_private = pmu; event->hw.event_base = rapl_msrs[bit].msr; event->hw.config = cfg; @@ -403,23 +399,6 @@ static void rapl_pmu_event_read(struct perf_event *event) rapl_event_update(event); } -static ssize_t rapl_get_attr_cpumask(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask); -} - -static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); - -static struct attribute *rapl_pmu_attrs[] = { - &dev_attr_cpumask.attr, - NULL, -}; - -static struct attribute_group rapl_pmu_attr_group = { - .attrs = rapl_pmu_attrs, -}; - RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); @@ -467,7 +446,6 @@ static struct attribute_group rapl_pmu_format_group = { }; static const struct attribute_group *rapl_attr_groups[] = { - &rapl_pmu_attr_group, &rapl_pmu_format_group, &rapl_pmu_events_group, NULL, @@ -570,54 +548,6 @@ static struct perf_msr amd_rapl_msrs[] = { [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, NULL, false, 0 }, }; -static int rapl_cpu_offline(unsigned int cpu) -{ - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); - int target; - - /* Check if exiting cpu is used for collecting rapl events */ - if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask)) - return 0; - - pmu->cpu = -1; - /* Find a new cpu to collect rapl events */ - target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu); - - /* Migrate rapl events to the new target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &rapl_cpu_mask); - pmu->cpu = target; - perf_pmu_migrate_context(pmu->pmu, cpu, target); - } - return 0; -} - -static int rapl_cpu_online(unsigned int cpu) -{ - s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu); - if (rapl_pmu_idx < 0) { - pr_err("topology_logical_(package/die)_id() returned a negative value"); - return -EINVAL; - } - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); - int target; - - if (!pmu) - return -ENOMEM; - - /* - * Check if there is an online cpu in the package which collects rapl - * events already. - */ - target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu)); - if (target < nr_cpu_ids) - return 0; - - cpumask_set_cpu(cpu, &rapl_cpu_mask); - pmu->cpu = cpu; - return 0; -} - static int rapl_check_hw_unit(struct rapl_model *rm) { u64 msr_rapl_power_unit_bits; @@ -725,9 +655,12 @@ free: static int __init init_rapl_pmus(void) { int nr_rapl_pmu = topology_max_packages(); + int rapl_pmu_scope = PERF_PMU_SCOPE_PKG; - if (!rapl_pmu_is_pkg_scope()) + if (!rapl_pmu_is_pkg_scope()) { nr_rapl_pmu *= topology_max_dies_per_package(); + rapl_pmu_scope = PERF_PMU_SCOPE_DIE; + } rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL); if (!rapl_pmus) @@ -743,6 +676,7 @@ static int __init init_rapl_pmus(void) rapl_pmus->pmu.start = rapl_pmu_event_start; rapl_pmus->pmu.stop = rapl_pmu_event_stop; rapl_pmus->pmu.read = rapl_pmu_event_read; + rapl_pmus->pmu.scope = rapl_pmu_scope; rapl_pmus->pmu.module = THIS_MODULE; rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; @@ -892,24 +826,13 @@ static int __init rapl_pmu_init(void) if (ret) return ret; - /* - * Install callbacks. Core will call them for each online cpu. - */ - ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, - "perf/x86/rapl:online", - rapl_cpu_online, rapl_cpu_offline); - if (ret) - goto out; - ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); if (ret) - goto out1; + goto out; rapl_advertise(); return 0; -out1: - cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); out: pr_warn("Initialization failed (%d), disabled\n", ret); cleanup_rapl_pmus(); @@ -919,7 +842,6 @@ module_init(rapl_pmu_init); static void __exit intel_rapl_exit(void) { - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); perf_pmu_unregister(&rapl_pmus->pmu); cleanup_rapl_pmus(); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2361ed4d2b15..37a9afffb59e 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -208,7 +208,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, - CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, From 2bf8e5aceff899f5117f14c73e869a61c44d8a69 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 23 Oct 2024 21:41:58 -0700 Subject: [PATCH 20/32] uprobes: allow put_uprobe() from non-sleepable softirq context Currently put_uprobe() might trigger mutex_lock()/mutex_unlock(), which makes it unsuitable to be called from more restricted context like softirq. Let's make put_uprobe() agnostic to the context in which it is called, and use work queue to defer the mutex-protected clean up steps. RB tree removal step is also moved into work-deferred callback to avoid potential deadlock between softirq-based timer callback, added in the next patch, and the rest of uprobe code. We can rework locking altogher as a follow up, but that's significantly more tricky, so warrants its own patch set. For now, we need to make sure that changes in the next patch that add timer thread work correctly with existing approach, while concentrating on SRCU + timeout logic. Signed-off-by: Andrii Nakryiko Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20241024044159.3156646-2-andrii@kernel.org --- kernel/events/uprobes.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 4ef4b51776eb..d7e489246608 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -61,7 +62,10 @@ struct uprobe { struct list_head pending_list; struct list_head consumers; struct inode *inode; /* Also hold a ref to inode */ - struct rcu_head rcu; + union { + struct rcu_head rcu; + struct work_struct work; + }; loff_t offset; loff_t ref_ctr_offset; unsigned long flags; /* "unsigned long" so bitops work */ @@ -625,10 +629,9 @@ static void uprobe_free_rcu(struct rcu_head *rcu) kfree(uprobe); } -static void put_uprobe(struct uprobe *uprobe) +static void uprobe_free_deferred(struct work_struct *work) { - if (!refcount_dec_and_test(&uprobe->ref)) - return; + struct uprobe *uprobe = container_of(work, struct uprobe, work); write_lock(&uprobes_treelock); @@ -652,6 +655,15 @@ static void put_uprobe(struct uprobe *uprobe) call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu); } +static void put_uprobe(struct uprobe *uprobe) +{ + if (!refcount_dec_and_test(&uprobe->ref)) + return; + + INIT_WORK(&uprobe->work, uprobe_free_deferred); + schedule_work(&uprobe->work); +} + static __always_inline int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, const struct uprobe *r) From dd1a7567784e2b1f80258be04f57bcfa82c997eb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 23 Oct 2024 21:41:59 -0700 Subject: [PATCH 21/32] uprobes: SRCU-protect uretprobe lifetime (with timeout) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid taking refcount on uprobe in prepare_uretprobe(), instead take uretprobe-specific SRCU lock and keep it active as kernel transfers control back to user space. Given we can't rely on user space returning from traced function within reasonable time period, we need to make sure not to keep SRCU lock active for too long, though. To that effect, we employ a timer callback which is meant to terminate SRCU lock region after predefined timeout (currently set to 100ms), and instead transfer underlying struct uprobe's lifetime protection to refcounting. This fallback to less scalable refcounting after 100ms is a fine tradeoff from uretprobe's scalability and performance perspective, because uretprobing *long running* user functions inherently doesn't run into scalability issues (there is just not enough frequency to cause noticeable issues with either performance or scalability). The overall trick is in ensuring synchronization between current thread and timer's callback fired on some other thread. To cope with that with minimal logic complications, we add hprobe wrapper which is used to contain all the synchronization related issues behind a small number of basic helpers: hprobe_expire() for "downgrading" uprobe from SRCU-protected state to refcounted state, and a hprobe_consume() and hprobe_finalize() pair of single-use consuming helpers. Other than that, whatever current thread's logic is there stays the same, as timer thread cannot modify return_instance state (or add new/remove old return_instances). It only takes care of SRCU unlock and uprobe refcounting, which is hidden from the higher-level uretprobe handling logic. We use atomic xchg() in hprobe_consume(), which is called from performance critical handle_uretprobe_chain() function run in the current context. When uncontended, this xchg() doesn't seem to hurt performance as there are no other competing CPUs fighting for the same cache line. We also mark struct return_instance as ____cacheline_aligned to ensure no false sharing can happen. Another technical moment. We need to make sure that the list of return instances can be safely traversed under RCU from timer callback, so we delay return_instance freeing with kfree_rcu() and make sure that list modifications use RCU-aware operations. Also, given SRCU lock survives transition from kernel to user space and back we need to use lower-level __srcu_read_lock() and __srcu_read_unlock() to avoid lockdep complaining. Just to give an impression of a kind of performance improvements this change brings, below are benchmarking results with and without these SRCU changes, assuming other uprobe optimizations (mainly RCU Tasks Trace for entry uprobes, lockless RB-tree lookup, and lockless VMA to uprobe lookup) are left intact: WITHOUT SRCU for uretprobes =========================== uretprobe-nop ( 1 cpus): 2.197 ± 0.002M/s ( 2.197M/s/cpu) uretprobe-nop ( 2 cpus): 3.325 ± 0.001M/s ( 1.662M/s/cpu) uretprobe-nop ( 3 cpus): 4.129 ± 0.002M/s ( 1.376M/s/cpu) uretprobe-nop ( 4 cpus): 6.180 ± 0.003M/s ( 1.545M/s/cpu) uretprobe-nop ( 8 cpus): 7.323 ± 0.005M/s ( 0.915M/s/cpu) uretprobe-nop (16 cpus): 6.943 ± 0.005M/s ( 0.434M/s/cpu) uretprobe-nop (32 cpus): 5.931 ± 0.014M/s ( 0.185M/s/cpu) uretprobe-nop (64 cpus): 5.145 ± 0.003M/s ( 0.080M/s/cpu) uretprobe-nop (80 cpus): 4.925 ± 0.005M/s ( 0.062M/s/cpu) WITH SRCU for uretprobes ======================== uretprobe-nop ( 1 cpus): 1.968 ± 0.001M/s ( 1.968M/s/cpu) uretprobe-nop ( 2 cpus): 3.739 ± 0.003M/s ( 1.869M/s/cpu) uretprobe-nop ( 3 cpus): 5.616 ± 0.003M/s ( 1.872M/s/cpu) uretprobe-nop ( 4 cpus): 7.286 ± 0.002M/s ( 1.822M/s/cpu) uretprobe-nop ( 8 cpus): 13.657 ± 0.007M/s ( 1.707M/s/cpu) uretprobe-nop (32 cpus): 45.305 ± 0.066M/s ( 1.416M/s/cpu) uretprobe-nop (64 cpus): 42.390 ± 0.922M/s ( 0.662M/s/cpu) uretprobe-nop (80 cpus): 47.554 ± 2.411M/s ( 0.594M/s/cpu) Signed-off-by: Andrii Nakryiko Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20241024044159.3156646-3-andrii@kernel.org --- include/linux/uprobes.h | 54 +++++++- kernel/events/uprobes.c | 291 +++++++++++++++++++++++++++++++++++----- 2 files changed, 306 insertions(+), 39 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index dbaf04189548..7a051b5d2edd 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -15,6 +15,7 @@ #include #include #include +#include struct uprobe; struct vm_area_struct; @@ -67,6 +68,53 @@ enum uprobe_task_state { UTASK_SSTEP_TRAPPED, }; +/* The state of hybrid-lifetime uprobe inside struct return_instance */ +enum hprobe_state { + HPROBE_LEASED, /* uretprobes_srcu-protected uprobe */ + HPROBE_STABLE, /* refcounted uprobe */ + HPROBE_GONE, /* NULL uprobe, SRCU expired, refcount failed */ + HPROBE_CONSUMED, /* uprobe "consumed" by uretprobe handler */ +}; + +/* + * Hybrid lifetime uprobe. Represents a uprobe instance that could be either + * SRCU protected (with SRCU protection eventually potentially timing out), + * refcounted using uprobe->ref, or there could be no valid uprobe (NULL). + * + * hprobe's internal state is setup such that background timer thread can + * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one + * (or no uprobe, if refcounting failed). + * + * *stable* pointer always point to the uprobe (or could be NULL if there is + * was no valid underlying uprobe to begin with). + * + * *leased* pointer is the key to achieving race-free atomic lifetime state + * transition and can have three possible states: + * - either the same non-NULL value as *stable*, in which case uprobe is + * SRCU-protected; + * - NULL, in which case uprobe (if there is any) is refcounted; + * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU + * protected initially, but SRCU period timed out and we attempted to + * convert it to refcounted, but refcount_inc_not_zero() failed, because + * uprobe effectively went away (the last consumer unsubscribed). In this + * case it's important to know that *stable* pointer (which still has + * non-NULL uprobe pointer) shouldn't be used, because lifetime of + * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an + * internal marker and is handled transparently by hprobe_fetch() helper. + * + * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for + * SRCU unlocking. + * + * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe + * state transitioning details. It all hinges on atomic xchg() over *leaded* + * pointer. *stable* pointer, once initially set, is not modified concurrently. + */ +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + /* * uprobe_task: Metadata of a task while it singlesteps. */ @@ -86,6 +134,7 @@ struct uprobe_task { }; struct uprobe *active_uprobe; + struct timer_list ri_timer; unsigned long xol_vaddr; struct arch_uprobe *auprobe; @@ -100,7 +149,7 @@ struct return_consumer { }; struct return_instance { - struct uprobe *uprobe; + struct hprobe hprobe; unsigned long func; unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ @@ -108,9 +157,10 @@ struct return_instance { int consumers_cnt; struct return_instance *next; /* keep as stack */ + struct rcu_head rcu; struct return_consumer consumers[] __counted_by(consumers_cnt); -}; +} ____cacheline_aligned; enum rp_check { RP_CHECK_CALL, diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d7e489246608..a76ddc5fc982 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -51,6 +52,9 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); +/* Covers return_instance's uprobe lifetime. */ +DEFINE_STATIC_SRCU(uretprobes_srcu); + /* Have a copy of original instruction */ #define UPROBE_COPY_INSN 0 @@ -622,13 +626,20 @@ static inline bool uprobe_is_active(struct uprobe *uprobe) return !RB_EMPTY_NODE(&uprobe->rb_node); } -static void uprobe_free_rcu(struct rcu_head *rcu) +static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu) { struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); kfree(uprobe); } +static void uprobe_free_srcu(struct rcu_head *rcu) +{ + struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); + + call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace); +} + static void uprobe_free_deferred(struct work_struct *work) { struct uprobe *uprobe = container_of(work, struct uprobe, work); @@ -652,7 +663,8 @@ static void uprobe_free_deferred(struct work_struct *work) delayed_uprobe_remove(uprobe, NULL); mutex_unlock(&delayed_uprobe_lock); - call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu); + /* start srcu -> rcu_tasks_trace -> kfree chain */ + call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu); } static void put_uprobe(struct uprobe *uprobe) @@ -664,6 +676,151 @@ static void put_uprobe(struct uprobe *uprobe) schedule_work(&uprobe->work); } +/* Initialize hprobe as SRCU-protected "leased" uprobe */ +static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx) +{ + WARN_ON(!uprobe); + hprobe->state = HPROBE_LEASED; + hprobe->uprobe = uprobe; + hprobe->srcu_idx = srcu_idx; +} + +/* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */ +static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe) +{ + hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE; + hprobe->uprobe = uprobe; + hprobe->srcu_idx = -1; +} + +/* + * hprobe_consume() fetches hprobe's underlying uprobe and detects whether + * uprobe is SRCU protected or is refcounted. hprobe_consume() can be + * used only once for a given hprobe. + * + * Caller has to call hprobe_finalize() and pass previous hprobe_state, so + * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever + * is appropriate. + */ +static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate) +{ + *hstate = xchg(&hprobe->state, HPROBE_CONSUMED); + switch (*hstate) { + case HPROBE_LEASED: + case HPROBE_STABLE: + return hprobe->uprobe; + case HPROBE_GONE: /* uprobe is NULL, no SRCU */ + case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */ + return NULL; + default: + WARN(1, "hprobe invalid state %d", *hstate); + return NULL; + } +} + +/* + * Reset hprobe state and, if hprobe was LEASED, release SRCU lock. + * hprobe_finalize() can only be used from current context after + * hprobe_consume() call (which determines uprobe and hstate value). + */ +static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate) +{ + switch (hstate) { + case HPROBE_LEASED: + __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); + break; + case HPROBE_STABLE: + put_uprobe(hprobe->uprobe); + break; + case HPROBE_GONE: + case HPROBE_CONSUMED: + break; + default: + WARN(1, "hprobe invalid state %d", hstate); + break; + } +} + +/* + * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED) + * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of + * them can win the race to perform SRCU unlocking. Whoever wins must perform + * SRCU unlock. + * + * Returns underlying valid uprobe or NULL, if there was no underlying uprobe + * to begin with or we failed to bump its refcount and it's going away. + * + * Returned non-NULL uprobe can be still safely used within an ongoing SRCU + * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has + * an extra refcount for caller to assume and use. Otherwise, it's not + * guaranteed that returned uprobe has a positive refcount, so caller has to + * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current + * SRCU lock region. See dup_utask(). + */ +static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get) +{ + enum hprobe_state hstate; + + /* + * return_instance's hprobe is protected by RCU. + * Underlying uprobe is itself protected from reuse by SRCU. + */ + lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu)); + + hstate = READ_ONCE(hprobe->state); + switch (hstate) { + case HPROBE_STABLE: + /* uprobe has positive refcount, bump refcount, if necessary */ + return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe; + case HPROBE_GONE: + /* + * SRCU was unlocked earlier and we didn't manage to take + * uprobe refcnt, so it's effectively NULL + */ + return NULL; + case HPROBE_CONSUMED: + /* + * uprobe was consumed, so it's effectively NULL as far as + * uretprobe processing logic is concerned + */ + return NULL; + case HPROBE_LEASED: { + struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe); + /* + * Try to switch hprobe state, guarding against + * hprobe_consume() or another hprobe_expire() racing with us. + * Note, if we failed to get uprobe refcount, we use special + * HPROBE_GONE state to signal that hprobe->uprobe shouldn't + * be used as it will be freed after SRCU is unlocked. + */ + if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) { + /* We won the race, we are the ones to unlock SRCU */ + __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); + return get ? get_uprobe(uprobe) : uprobe; + } + + /* + * We lost the race, undo refcount bump (if it ever happened), + * unless caller would like an extra refcount anyways. + */ + if (uprobe && !get) + put_uprobe(uprobe); + /* + * Even if hprobe_consume() or another hprobe_expire() wins + * the state update race and unlocks SRCU from under us, we + * still have a guarantee that underyling uprobe won't be + * freed due to ongoing caller's SRCU lock region, so we can + * return it regardless. Also, if `get` was true, we also have + * an extra ref for the caller to own. This is used in dup_utask(). + */ + return uprobe; + } + default: + WARN(1, "unknown hprobe state %d", hstate); + return NULL; + } +} + static __always_inline int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, const struct uprobe *r) @@ -1169,6 +1326,7 @@ void uprobe_unregister_sync(void) * handler_chain() or handle_uretprobe_chain() to do an use-after-free. */ synchronize_rcu_tasks_trace(); + synchronize_srcu(&uretprobes_srcu); } EXPORT_SYMBOL_GPL(uprobe_unregister_sync); @@ -1731,11 +1889,18 @@ unsigned long uprobe_get_trap_addr(struct pt_regs *regs) return instruction_pointer(regs); } -static struct return_instance *free_ret_instance(struct return_instance *ri) +static struct return_instance *free_ret_instance(struct return_instance *ri, bool cleanup_hprobe) { struct return_instance *next = ri->next; - put_uprobe(ri->uprobe); - kfree(ri); + + if (cleanup_hprobe) { + enum hprobe_state hstate; + + (void)hprobe_consume(&ri->hprobe, &hstate); + hprobe_finalize(&ri->hprobe, hstate); + } + + kfree_rcu(ri, rcu); return next; } @@ -1753,14 +1918,48 @@ void uprobe_free_utask(struct task_struct *t) WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr); + timer_delete_sync(&utask->ri_timer); + ri = utask->return_instances; while (ri) - ri = free_ret_instance(ri); + ri = free_ret_instance(ri, true /* cleanup_hprobe */); kfree(utask); t->utask = NULL; } +#define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */ + +#define for_each_ret_instance_rcu(pos, head) \ + for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next)) + +static void ri_timer(struct timer_list *timer) +{ + struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer); + struct return_instance *ri; + + /* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */ + guard(srcu)(&uretprobes_srcu); + /* RCU protects return_instance from freeing. */ + guard(rcu)(); + + for_each_ret_instance_rcu(ri, utask->return_instances) + hprobe_expire(&ri->hprobe, false); +} + +static struct uprobe_task *alloc_utask(void) +{ + struct uprobe_task *utask; + + utask = kzalloc(sizeof(*utask), GFP_KERNEL); + if (!utask) + return NULL; + + timer_setup(&utask->ri_timer, ri_timer, 0); + + return utask; +} + /* * Allocate a uprobe_task object for the task if necessary. * Called when the thread hits a breakpoint. @@ -1772,7 +1971,7 @@ void uprobe_free_utask(struct task_struct *t) static struct uprobe_task *get_utask(void) { if (!current->utask) - current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); + current->utask = alloc_utask(); return current->utask; } @@ -1808,29 +2007,37 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) { struct uprobe_task *n_utask; struct return_instance **p, *o, *n; + struct uprobe *uprobe; - n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); + n_utask = alloc_utask(); if (!n_utask) return -ENOMEM; t->utask = n_utask; + /* protect uprobes from freeing, we'll need try_get_uprobe() them */ + guard(srcu)(&uretprobes_srcu); + p = &n_utask->return_instances; for (o = o_utask->return_instances; o; o = o->next) { n = dup_return_instance(o); if (!n) return -ENOMEM; - /* - * uprobe's refcnt has to be positive at this point, kept by - * utask->return_instances items; return_instances can't be - * removed right now, as task is blocked due to duping; so - * get_uprobe() is safe to use here. - */ - get_uprobe(n->uprobe); - n->next = NULL; + /* if uprobe is non-NULL, we'll have an extra refcount for uprobe */ + uprobe = hprobe_expire(&o->hprobe, true); - *p = n; + /* + * New utask will have stable properly refcounted uprobe or + * NULL. Even if we failed to get refcounted uprobe, we still + * need to preserve full set of return_instances for proper + * uretprobe handling and nesting in forked task. + */ + hprobe_init_stable(&n->hprobe, uprobe); + + n->next = NULL; + rcu_assign_pointer(*p, n); p = &n->next; + n_utask->depth++; } @@ -1906,10 +2113,10 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained, enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { - ri = free_ret_instance(ri); + ri = free_ret_instance(ri, true /* cleanup_hprobe */); utask->depth--; } - utask->return_instances = ri; + rcu_assign_pointer(utask->return_instances, ri); } static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, @@ -1918,6 +2125,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; bool chained; + int srcu_idx; if (!get_xol_area()) goto free; @@ -1929,14 +2137,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, goto free; } - /* we need to bump refcount to store uprobe in utask */ - if (!try_get_uprobe(uprobe)) - goto free; - trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) - goto put; + goto free; /* drop the entries invalidated by longjmp() */ chained = (orig_ret_vaddr == trampoline_vaddr); @@ -1954,23 +2158,28 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, * attack from user-space. */ uprobe_warn(current, "handle tail call"); - goto put; + goto free; } orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; } - ri->uprobe = uprobe; + + /* __srcu_read_lock() because SRCU lock survives switch to user space */ + srcu_idx = __srcu_read_lock(&uretprobes_srcu); + ri->func = instruction_pointer(regs); ri->stack = user_stack_pointer(regs); ri->orig_ret_vaddr = orig_ret_vaddr; ri->chained = chained; utask->depth++; + + hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx); ri->next = utask->return_instances; - utask->return_instances = ri; + rcu_assign_pointer(utask->return_instances, ri); + + mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD); return; -put: - put_uprobe(uprobe); free: kfree(ri); } @@ -2215,13 +2424,16 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) } static void -handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) +handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs) { - struct uprobe *uprobe = ri->uprobe; struct return_consumer *ric; struct uprobe_consumer *uc; int ric_idx = 0; + /* all consumers unsubscribed meanwhile */ + if (unlikely(!uprobe)) + return; + rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { bool session = uc->handler && uc->ret_handler; @@ -2251,6 +2463,8 @@ void uprobe_handle_trampoline(struct pt_regs *regs) { struct uprobe_task *utask; struct return_instance *ri, *next; + struct uprobe *uprobe; + enum hprobe_state hstate; bool valid; utask = current->utask; @@ -2281,21 +2495,24 @@ void uprobe_handle_trampoline(struct pt_regs *regs) * trampoline addresses on the stack are replaced with correct * original return addresses */ - utask->return_instances = ri->next; + rcu_assign_pointer(utask->return_instances, ri->next); + + uprobe = hprobe_consume(&ri->hprobe, &hstate); if (valid) - handle_uretprobe_chain(ri, regs); - ri = free_ret_instance(ri); + handle_uretprobe_chain(ri, uprobe, regs); + hprobe_finalize(&ri->hprobe, hstate); + + /* We already took care of hprobe, no need to waste more time on that. */ + ri = free_ret_instance(ri, false /* !cleanup_hprobe */); utask->depth--; } while (ri != next); } while (!valid); - utask->return_instances = ri; return; - sigill: +sigill: uprobe_warn(current, "handle uretprobe, sending SIGILL."); force_sig(SIGILL); - } bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) From 5b590160d2cf776b304eb054afafea2bd55e3620 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Oct 2024 18:59:07 +0300 Subject: [PATCH 22/32] perf/x86/intel/pt: Fix buffer full but size is 0 case If the trace data buffer becomes full, a truncated flag [T] is reported in PERF_RECORD_AUX. In some cases, the size reported is 0, even though data must have been added to make the buffer full. That happens when the buffer fills up from empty to full before the Intel PT driver has updated the buffer position. Then the driver calculates the new buffer position before calculating the data size. If the old and new positions are the same, the data size is reported as 0, even though it is really the whole buffer size. Fix by detecting when the buffer position is wrapped, and adjust the data size calculation accordingly. Example Use a very small buffer size (8K) and observe the size of truncated [T] data. Before the fix, it is possible to see records of 0 size. Before: $ perf record -m,8K -e intel_pt// uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.105 MB perf.data ] $ perf script -D --no-itrace | grep AUX | grep -F '[T]' Warning: AUX data lost 2 times out of 3! 5 19462712368111 0x19710 [0x40]: PERF_RECORD_AUX offset: 0 size: 0 flags: 0x1 [T] 5 19462712700046 0x19ba8 [0x40]: PERF_RECORD_AUX offset: 0x170 size: 0xe90 flags: 0x1 [T] After: $ perf record -m,8K -e intel_pt// uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.040 MB perf.data ] $ perf script -D --no-itrace | grep AUX | grep -F '[T]' Warning: AUX data lost 2 times out of 3! 1 113720802995 0x4948 [0x40]: PERF_RECORD_AUX offset: 0 size: 0x2000 flags: 0x1 [T] 1 113720979812 0x6b10 [0x40]: PERF_RECORD_AUX offset: 0x2000 size: 0x2000 flags: 0x1 [T] Fixes: 52ca9ced3f70 ("perf/x86/intel/pt: Add Intel PT PMU driver") Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20241022155920.17511-2-adrian.hunter@intel.com --- arch/x86/events/intel/pt.c | 11 ++++++++--- arch/x86/events/intel/pt.h | 2 ++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index fd4670a6694e..a087bc0c5498 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -828,11 +828,13 @@ static void pt_buffer_advance(struct pt_buffer *buf) buf->cur_idx++; if (buf->cur_idx == buf->cur->last) { - if (buf->cur == buf->last) + if (buf->cur == buf->last) { buf->cur = buf->first; - else + buf->wrapped = true; + } else { buf->cur = list_entry(buf->cur->list.next, struct topa, list); + } buf->cur_idx = 0; } } @@ -846,8 +848,11 @@ static void pt_buffer_advance(struct pt_buffer *buf) static void pt_update_head(struct pt *pt) { struct pt_buffer *buf = perf_get_aux(&pt->handle); + bool wrapped = buf->wrapped; u64 topa_idx, base, old; + buf->wrapped = false; + if (buf->single) { local_set(&buf->data_size, buf->output_off); return; @@ -865,7 +870,7 @@ static void pt_update_head(struct pt *pt) } else { old = (local64_xchg(&buf->head, base) & ((buf->nr_pages << PAGE_SHIFT) - 1)); - if (base < old) + if (base < old || (base == old && wrapped)) base += buf->nr_pages << PAGE_SHIFT; local_add(base - old, &buf->data_size); diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index f5e46c04c145..a1b6c04b7f68 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -65,6 +65,7 @@ struct pt_pmu { * @head: logical write offset inside the buffer * @snapshot: if this is for a snapshot/overwrite counter * @single: use Single Range Output instead of ToPA + * @wrapped: buffer advance wrapped back to the first topa table * @stop_pos: STOP topa entry index * @intr_pos: INT topa entry index * @stop_te: STOP topa entry pointer @@ -82,6 +83,7 @@ struct pt_buffer { local64_t head; bool snapshot; bool single; + bool wrapped; long stop_pos, intr_pos; struct topa_entry *stop_te, *intr_te; void **data_pages; From 18d92bb57c39504d9da11c6ef604f58eb1d5a117 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Oct 2024 18:59:08 +0300 Subject: [PATCH 23/32] perf/core: Add aux_pause, aux_resume, aux_start_paused Hardware traces, such as instruction traces, can produce a vast amount of trace data, so being able to reduce tracing to more specific circumstances can be useful. The ability to pause or resume tracing when another event happens, can do that. Add ability for an event to "pause" or "resume" AUX area tracing. Add aux_pause bit to perf_event_attr to indicate that, if the event happens, the associated AUX area tracing should be paused. Ditto aux_resume. Do not allow aux_pause and aux_resume to be set together. Add aux_start_paused bit to perf_event_attr to indicate to an AUX area event that it should start in a "paused" state. Add aux_paused to struct hw_perf_event for AUX area events to keep track of the "paused" state. aux_paused is initialized to aux_start_paused. Add PERF_EF_PAUSE and PERF_EF_RESUME modes for ->stop() and ->start() callbacks. Call as needed, during __perf_event_output(). Add aux_in_pause_resume to struct perf_buffer to prevent races with the NMI handler. Pause/resume in NMI context will miss out if it coincides with another pause/resume. To use aux_pause or aux_resume, an event must be in a group with the AUX area event as the group leader. Example (requires Intel PT and tools patches also): $ perf record --kcore -e intel_pt/aux-action=start-paused/k,syscalls:sys_enter_newuname/aux-action=resume/,syscalls:sys_exit_newuname/aux-action=pause/ uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.043 MB perf.data ] $ perf script --call-trace uname 30805 [000] 24001.058782799: name: 0x7ffc9c1865b0 uname 30805 [000] 24001.058784424: psb offs: 0 uname 30805 [000] 24001.058784424: cbr: 39 freq: 3904 MHz (139%) uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) debug_smp_processor_id uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) __x64_sys_newuname uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) down_read uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) __cond_resched uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) preempt_count_add uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) in_lock_functions uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) preempt_count_sub uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) up_read uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) preempt_count_add uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) in_lock_functions uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) preempt_count_sub uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) _copy_to_user uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) syscall_exit_to_user_mode uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) syscall_exit_work uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) perf_syscall_exit uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) debug_smp_processor_id uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_trace_buf_alloc uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_swevent_get_recursion_context uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) debug_smp_processor_id uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) debug_smp_processor_id uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_tp_event uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_trace_buf_update uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) tracing_gen_ctx_irq_test uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_swevent_event uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __perf_event_account_interrupt uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __this_cpu_preempt_check uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_event_output_forward uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_event_aux_pause uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) ring_buffer_get uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __rcu_read_lock uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __rcu_read_unlock uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) pt_event_stop uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) debug_smp_processor_id uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) debug_smp_processor_id uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) native_write_msr uname 30805 [000] 24001.058785463: ([kernel.kallsyms]) native_write_msr uname 30805 [000] 24001.058785639: 0x0 Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: James Clark Link: https://lkml.kernel.org/r/20241022155920.17511-3-adrian.hunter@intel.com --- include/linux/perf_event.h | 28 ++++++++++++ include/uapi/linux/perf_event.h | 11 ++++- kernel/events/core.c | 75 +++++++++++++++++++++++++++++++-- kernel/events/internal.h | 1 + 4 files changed, 110 insertions(+), 5 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fb908843f209..91b310052a7c 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -170,6 +170,12 @@ struct hw_perf_event { }; struct { /* aux / Intel-PT */ u64 aux_config; + /* + * For AUX area events, aux_paused cannot be a state + * flag because it can be updated asynchronously to + * state. + */ + unsigned int aux_paused; }; struct { /* software */ struct hrtimer hrtimer; @@ -294,6 +300,7 @@ struct perf_event_pmu_context; #define PERF_PMU_CAP_NO_EXCLUDE 0x0040 #define PERF_PMU_CAP_AUX_OUTPUT 0x0080 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 +#define PERF_PMU_CAP_AUX_PAUSE 0x0200 /** * pmu::scope @@ -384,6 +391,8 @@ struct pmu { #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ +#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */ +#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */ /* * Adds/Removes a counter to/from the PMU, can be done inside a @@ -423,6 +432,18 @@ struct pmu { * * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. + * + * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not + * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with + * PERF_EF_RESUME. + * + * ->start() with PERF_EF_RESUME will start as simply as possible but + * only if the counter is not otherwise stopped. Will not overlap + * another ->start() with PERF_EF_RESUME nor ->stop() with + * PERF_EF_PAUSE. + * + * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other + * ->stop()/->start() invocations, just not itself. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); @@ -1679,6 +1700,13 @@ static inline bool has_aux(struct perf_event *event) return event->pmu->setup_aux; } +static inline bool has_aux_action(struct perf_event *event) +{ + return event->attr.aux_sample_size || + event->attr.aux_pause || + event->attr.aux_resume; +} + static inline bool is_write_backward(struct perf_event *event) { return !!event->attr.write_backward; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4842c36fdf80..0524d541d4e3 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -511,7 +511,16 @@ struct perf_event_attr { __u16 sample_max_stack; __u16 __reserved_2; __u32 aux_sample_size; - __u32 __reserved_3; + + union { + __u32 aux_action; + struct { + __u32 aux_start_paused : 1, /* start AUX area tracing paused */ + aux_pause : 1, /* on overflow, pause AUX area tracing */ + aux_resume : 1, /* on overflow, resume AUX area tracing */ + __reserved_3 : 29; + }; + }; /* * User provided data if sigtrap=1, passed back to user via diff --git a/kernel/events/core.c b/kernel/events/core.c index e3589c4287cb..0e9cfe6f3535 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2146,7 +2146,7 @@ static void perf_put_aux_event(struct perf_event *event) static bool perf_need_aux_event(struct perf_event *event) { - return !!event->attr.aux_output || !!event->attr.aux_sample_size; + return event->attr.aux_output || has_aux_action(event); } static int perf_get_aux_event(struct perf_event *event, @@ -2171,6 +2171,10 @@ static int perf_get_aux_event(struct perf_event *event, !perf_aux_output_match(event, group_leader)) return 0; + if ((event->attr.aux_pause || event->attr.aux_resume) && + !(group_leader->pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) + return 0; + if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) return 0; @@ -8016,6 +8020,49 @@ void perf_prepare_header(struct perf_event_header *header, WARN_ON_ONCE(header->size & 7); } +static void __perf_event_aux_pause(struct perf_event *event, bool pause) +{ + if (pause) { + if (!event->hw.aux_paused) { + event->hw.aux_paused = 1; + event->pmu->stop(event, PERF_EF_PAUSE); + } + } else { + if (event->hw.aux_paused) { + event->hw.aux_paused = 0; + event->pmu->start(event, PERF_EF_RESUME); + } + } +} + +static void perf_event_aux_pause(struct perf_event *event, bool pause) +{ + struct perf_buffer *rb; + + if (WARN_ON_ONCE(!event)) + return; + + rb = ring_buffer_get(event); + if (!rb) + return; + + scoped_guard (irqsave) { + /* + * Guard against self-recursion here. Another event could trip + * this same from NMI context. + */ + if (READ_ONCE(rb->aux_in_pause_resume)) + break; + + WRITE_ONCE(rb->aux_in_pause_resume, 1); + barrier(); + __perf_event_aux_pause(event, pause); + barrier(); + WRITE_ONCE(rb->aux_in_pause_resume, 0); + } + ring_buffer_put(rb); +} + static __always_inline int __perf_event_output(struct perf_event *event, struct perf_sample_data *data, @@ -9818,9 +9865,12 @@ static int __perf_event_overflow(struct perf_event *event, ret = __perf_event_account_interrupt(event, throttle); + if (event->attr.aux_pause) + perf_event_aux_pause(event->aux_event, true); + if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && !bpf_overflow_handler(event, data, regs)) - return ret; + goto out; /* * XXX event_limit might not quite work as expected on inherited @@ -9882,6 +9932,9 @@ static int __perf_event_overflow(struct perf_event *event, event->pending_wakeup = 1; irq_work_queue(&event->pending_irq); } +out: + if (event->attr.aux_resume) + perf_event_aux_pause(event->aux_event, false); return ret; } @@ -12273,11 +12326,25 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, } if (event->attr.aux_output && - !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { + (!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) || + event->attr.aux_pause || event->attr.aux_resume)) { err = -EOPNOTSUPP; goto err_pmu; } + if (event->attr.aux_pause && event->attr.aux_resume) { + err = -EINVAL; + goto err_pmu; + } + + if (event->attr.aux_start_paused) { + if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) { + err = -EOPNOTSUPP; + goto err_pmu; + } + event->hw.aux_paused = 1; + } + if (cgroup_fd != -1) { err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); if (err) @@ -13073,7 +13140,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, * Grouping is not supported for kernel events, neither is 'AUX', * make sure the caller's intentions are adjusted. */ - if (attr->aux_output) + if (attr->aux_output || attr->aux_action) return ERR_PTR(-EINVAL); event = perf_event_alloc(attr, cpu, task, NULL, NULL, diff --git a/kernel/events/internal.h b/kernel/events/internal.h index e072d995d670..249288d82b8d 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -52,6 +52,7 @@ struct perf_buffer { void (*free_aux)(void *); refcount_t aux_refcount; int aux_in_sampling; + int aux_in_pause_resume; void **aux_pages; void *aux_priv; From 08c7454ceb948d773fcd0ff7b6fb9c315e2f801a Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Oct 2024 18:59:09 +0300 Subject: [PATCH 24/32] perf/x86/intel/pt: Add support for pause / resume Prevent tracing to start if aux_paused. Implement support for PERF_EF_PAUSE / PERF_EF_RESUME. When aux_paused, stop tracing. When not aux_paused, only start tracing if it isn't currently meant to be stopped. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Andi Kleen Link: https://lkml.kernel.org/r/20241022155920.17511-4-adrian.hunter@intel.com --- arch/x86/events/intel/pt.c | 73 ++++++++++++++++++++++++++++++++++++-- arch/x86/events/intel/pt.h | 4 +++ 2 files changed, 74 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index a087bc0c5498..4b0373bc8ab4 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -418,6 +418,9 @@ static void pt_config_start(struct perf_event *event) struct pt *pt = this_cpu_ptr(&pt_ctx); u64 ctl = event->hw.aux_config; + if (READ_ONCE(event->hw.aux_paused)) + return; + ctl |= RTIT_CTL_TRACEEN; if (READ_ONCE(pt->vmx_on)) perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); @@ -534,7 +537,24 @@ static void pt_config(struct perf_event *event) reg |= (event->attr.config & PT_CONFIG_MASK); event->hw.aux_config = reg; + + /* + * Allow resume before starting so as not to overwrite a value set by a + * PMI. + */ + barrier(); + WRITE_ONCE(pt->resume_allowed, 1); + /* Configuration is complete, it is now OK to handle an NMI */ + barrier(); + WRITE_ONCE(pt->handle_nmi, 1); + barrier(); pt_config_start(event); + barrier(); + /* + * Allow pause after starting so its pt_config_stop() doesn't race with + * pt_config_start(). + */ + WRITE_ONCE(pt->pause_allowed, 1); } static void pt_config_stop(struct perf_event *event) @@ -1516,6 +1536,7 @@ void intel_pt_interrupt(void) buf = perf_aux_output_begin(&pt->handle, event); if (!buf) { event->hw.state = PERF_HES_STOPPED; + WRITE_ONCE(pt->resume_allowed, 0); return; } @@ -1524,6 +1545,7 @@ void intel_pt_interrupt(void) ret = pt_buffer_reset_markers(buf, &pt->handle); if (ret) { perf_aux_output_end(&pt->handle, 0); + WRITE_ONCE(pt->resume_allowed, 0); return; } @@ -1578,6 +1600,26 @@ static void pt_event_start(struct perf_event *event, int mode) struct pt *pt = this_cpu_ptr(&pt_ctx); struct pt_buffer *buf; + if (mode & PERF_EF_RESUME) { + if (READ_ONCE(pt->resume_allowed)) { + u64 status; + + /* + * Only if the trace is not active and the error and + * stopped bits are clear, is it safe to start, but a + * PMI might have just cleared these, so resume_allowed + * must be checked again also. + */ + rdmsrl(MSR_IA32_RTIT_STATUS, status); + if (!(status & (RTIT_STATUS_TRIGGEREN | + RTIT_STATUS_ERROR | + RTIT_STATUS_STOPPED)) && + READ_ONCE(pt->resume_allowed)) + pt_config_start(event); + } + return; + } + buf = perf_aux_output_begin(&pt->handle, event); if (!buf) goto fail_stop; @@ -1588,7 +1630,6 @@ static void pt_event_start(struct perf_event *event, int mode) goto fail_end_stop; } - WRITE_ONCE(pt->handle_nmi, 1); hwc->state = 0; pt_config_buffer(buf); @@ -1606,6 +1647,12 @@ static void pt_event_stop(struct perf_event *event, int mode) { struct pt *pt = this_cpu_ptr(&pt_ctx); + if (mode & PERF_EF_PAUSE) { + if (READ_ONCE(pt->pause_allowed)) + pt_config_stop(event); + return; + } + /* * Protect against the PMI racing with disabling wrmsr, * see comment in intel_pt_interrupt(). @@ -1613,6 +1660,15 @@ static void pt_event_stop(struct perf_event *event, int mode) WRITE_ONCE(pt->handle_nmi, 0); barrier(); + /* + * Prevent a resume from attempting to restart tracing, or a pause + * during a subsequent start. Do this after clearing handle_nmi so that + * pt_event_snapshot_aux() will not re-allow them. + */ + WRITE_ONCE(pt->pause_allowed, 0); + WRITE_ONCE(pt->resume_allowed, 0); + barrier(); + pt_config_stop(event); if (event->hw.state == PERF_HES_STOPPED) @@ -1662,6 +1718,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, if (WARN_ON_ONCE(!buf->snapshot)) return 0; + /* Prevent pause/resume from attempting to start/stop tracing */ + WRITE_ONCE(pt->pause_allowed, 0); + WRITE_ONCE(pt->resume_allowed, 0); + barrier(); /* * There is no PT interrupt in this mode, so stop the trace and it will * remain stopped while the buffer is copied. @@ -1681,8 +1741,13 @@ static long pt_event_snapshot_aux(struct perf_event *event, * Here, handle_nmi tells us if the tracing was on. * If the tracing was on, restart it. */ - if (READ_ONCE(pt->handle_nmi)) + if (READ_ONCE(pt->handle_nmi)) { + WRITE_ONCE(pt->resume_allowed, 1); + barrier(); pt_config_start(event); + barrier(); + WRITE_ONCE(pt->pause_allowed, 1); + } return ret; } @@ -1798,7 +1863,9 @@ static __init int pt_init(void) if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG; - pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE; + pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | + PERF_PMU_CAP_ITRACE | + PERF_PMU_CAP_AUX_PAUSE; pt_pmu.pmu.attr_groups = pt_attr_groups; pt_pmu.pmu.task_ctx_nr = perf_sw_context; pt_pmu.pmu.event_init = pt_event_init; diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index a1b6c04b7f68..7ee94fc6d7cb 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -119,6 +119,8 @@ struct pt_filters { * @filters: last configured filters * @handle_nmi: do handle PT PMI on this cpu, there's an active event * @vmx_on: 1 if VMX is ON on this cpu + * @pause_allowed: PERF_EF_PAUSE is allowed to stop tracing + * @resume_allowed: PERF_EF_RESUME is allowed to start tracing * @output_base: cached RTIT_OUTPUT_BASE MSR value * @output_mask: cached RTIT_OUTPUT_MASK MSR value */ @@ -127,6 +129,8 @@ struct pt { struct pt_filters filters; int handle_nmi; int vmx_on; + int pause_allowed; + int resume_allowed; u64 output_base; u64 output_mask; }; From 0d5eb14c1e2ed4a8413458cb3b779f215ff214aa Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Oct 2024 18:59:10 +0300 Subject: [PATCH 25/32] perf/x86/intel: Do not enable large PEBS for events with aux actions or aux sampling Events with aux actions or aux sampling expect the PMI to coincide with the event, which does not happen for large PEBS, so do not enable large PEBS in that case. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Andi Kleen Link: https://lkml.kernel.org/r/20241022155920.17511-5-adrian.hunter@intel.com --- arch/x86/events/intel/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 7ca40002a19b..bb284aff7bfd 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3962,8 +3962,8 @@ static int intel_pmu_hw_config(struct perf_event *event) if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; - if (!(event->attr.sample_type & - ~intel_pmu_large_pebs_flags(event))) { + if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) && + !has_aux_action(event)) { event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; event->attach_state |= PERF_ATTACH_SCHED_CB; } From 2e71e8bc6f02275a67bcb882779ab6d21abc68c4 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 5 Nov 2024 09:52:53 +0100 Subject: [PATCH 26/32] perf/x86/amd/uncore: Avoid a false positive warning about snprintf truncation in amd_uncore_umc_ctx_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following warning: CC [M] arch/x86/events/amd/uncore.o arch/x86/events/amd/uncore.c: In function ‘amd_uncore_umc_ctx_init’: arch/x86/events/amd/uncore.c:951:52: warning: ‘%d’ directive output may be truncated writing between 1 and 10 bytes into a region of size 8 [-Wformat-truncation=] snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index); ^~ arch/x86/events/amd/uncore.c:951:43: note: directive argument in the range [0, 2147483647] snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index); ^~~~~~~~~~~~ arch/x86/events/amd/uncore.c:951:4: note: ‘snprintf’ output between 10 and 19 bytes into a destination of size 16 snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As far as I can see, there can't be more than UNCORE_GROUP_MAX (256) groups and each group can't have more than 255 PMU, so the number printed by this %d can't exceed 65279, that's only 5 digits and would fit into the buffer. So it's a false positive warning. But we can make the compiler happy by declaring index as a 16-bit number. Signed-off-by: Jean Delvare Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sandipan Das Link: https://lore.kernel.org/r/20241105095253.18f34b4d@endymion.delvare --- arch/x86/events/amd/uncore.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 0bfde2ea5cb8..49c26ce2b115 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -916,7 +916,8 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu) u8 group_num_pmcs[UNCORE_GROUP_MAX] = { 0 }; union amd_uncore_info info; struct amd_uncore_pmu *pmu; - int index = 0, gid, i; + int gid, i; + u16 index = 0; if (pmu_version < 2) return 0; @@ -948,7 +949,7 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu) for_each_set_bit(gid, gmask, UNCORE_GROUP_MAX) { for (i = 0; i < group_num_pmus[gid]; i++) { pmu = &uncore->pmus[index]; - snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index); + snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%hu", index); pmu->num_counters = group_num_pmcs[gid] / group_num_pmus[gid]; pmu->msr_base = MSR_F19H_UMC_PERF_CTL + i * pmu->num_counters * 2; pmu->rdpmc_base = -1; From c554aa9ca976480839af342204e05bb4ce8367d5 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 1 Nov 2024 22:13:33 +0100 Subject: [PATCH 27/32] uprobes: Re-order struct uprobe_task to save some space On x86_64, with allmodconfig, struct uprobe_task is 72 bytes long, with a hole and some padding. /* size: 72, cachelines: 2, members: 7 */ /* sum members: 64, holes: 1, sum holes: 4 */ /* padding: 4 */ /* forced alignments: 1, forced holes: 1, sum forced holes: 4 */ /* last cacheline: 8 bytes */ Reorder the structure to fill the hole and avoid the padding. This way, the whole structure fits in a single cacheline and some memory is saved when it is allocated. /* size: 64, cachelines: 1, members: 7 */ /* forced alignments: 1 */ Signed-off-by: Christophe JAILLET Signed-off-by: Peter Zijlstra (Intel) Acked-by: "Masami Hiramatsu (Google)" Link: https://lore.kernel.org/r/a9f541d0cedf421f765c77a1fb93d6a979778a88.1730495562.git.christophe.jaillet@wanadoo.fr --- include/linux/uprobes.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 7a051b5d2edd..e0a4c2082245 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -121,6 +121,9 @@ struct hprobe { struct uprobe_task { enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { struct { struct arch_uprobe_task autask; @@ -138,9 +141,6 @@ struct uprobe_task { unsigned long xol_vaddr; struct arch_uprobe *auprobe; - - struct return_instance *return_instances; - unsigned int depth; }; struct return_consumer { From e33ed362cf9e35db6082f7a776b7e8d557407e19 Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Wed, 13 Nov 2024 19:01:51 +0000 Subject: [PATCH 28/32] perf/arm: Drop unused functions For ARM's implementation, perf_instruction_pointer() and perf_misc_flags() are equivalent to the generic versions in include/linux/perf_event.h so arch/arm doesn't need to provide its own versions. Drop them here. Signed-off-by: Colton Lewis Signed-off-by: Ingo Molnar Reviewed-by: Oliver Upton Acked-by: Will Deacon Link: https://lore.kernel.org/r/20241113190156.2145593-2-coltonlewis@google.com --- arch/arm/include/asm/perf_event.h | 7 ------- arch/arm/kernel/perf_callchain.c | 17 ----------------- 2 files changed, 24 deletions(-) diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index bdbc1e590891..c08f16f2e243 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -8,13 +8,6 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -#ifdef CONFIG_PERF_EVENTS -struct pt_regs; -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -extern unsigned long perf_misc_flags(struct pt_regs *regs); -#define perf_misc_flags(regs) perf_misc_flags(regs) -#endif - #define perf_arch_fetch_caller_regs(regs, __ip) { \ (regs)->ARM_pc = (__ip); \ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \ diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c index 1d230ac9d0eb..a2601b1ef318 100644 --- a/arch/arm/kernel/perf_callchain.c +++ b/arch/arm/kernel/perf_callchain.c @@ -96,20 +96,3 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re arm_get_current_stackframe(regs, &fr); walk_stackframe(&fr, callchain_trace, entry); } - -unsigned long perf_instruction_pointer(struct pt_regs *regs) -{ - return instruction_pointer(regs); -} - -unsigned long perf_misc_flags(struct pt_regs *regs) -{ - int misc = 0; - - if (user_mode(regs)) - misc |= PERF_RECORD_MISC_USER; - else - misc |= PERF_RECORD_MISC_KERNEL; - - return misc; -} From 04782e63917dbcb60932fe93df52c4a4e3859d07 Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Wed, 13 Nov 2024 19:01:52 +0000 Subject: [PATCH 29/32] perf/core: Hoist perf_instruction_pointer() and perf_misc_flags() For clarity, rename the arch-specific definitions of these functions to perf_arch_* to denote they are arch-specifc. Define the generic-named functions in one place where they can call the arch-specific ones as needed. Signed-off-by: Colton Lewis Signed-off-by: Ingo Molnar Reviewed-by: Oliver Upton Acked-by: Thomas Richter Acked-by: Mark Rutland Acked-by: Madhavan Srinivasan Acked-by: Kan Liang Link: https://lore.kernel.org/r/20241113190156.2145593-3-coltonlewis@google.com --- arch/arm64/include/asm/perf_event.h | 6 +++--- arch/arm64/kernel/perf_callchain.c | 4 ++-- arch/powerpc/include/asm/perf_event_server.h | 6 +++--- arch/powerpc/perf/core-book3s.c | 4 ++-- arch/s390/include/asm/perf_event.h | 6 +++--- arch/s390/kernel/perf_event.c | 4 ++-- arch/x86/events/core.c | 4 ++-- arch/x86/include/asm/perf_event.h | 10 +++++----- include/linux/perf_event.h | 9 ++++++--- kernel/events/core.c | 10 ++++++++++ 10 files changed, 38 insertions(+), 25 deletions(-) diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index eb7071c9eb34..31a5584ed423 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -11,9 +11,9 @@ #ifdef CONFIG_PERF_EVENTS struct pt_regs; -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -extern unsigned long perf_misc_flags(struct pt_regs *regs); -#define perf_misc_flags(regs) perf_misc_flags(regs) +extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); +#define perf_arch_misc_flags(regs) perf_misc_flags(regs) #define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs #endif diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index e8ed5673f481..01a9d08fc009 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -39,7 +39,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, arch_stack_walk(callchain_trace, entry, current, regs); } -unsigned long perf_instruction_pointer(struct pt_regs *regs) +unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) { if (perf_guest_state()) return perf_guest_get_ip(); @@ -47,7 +47,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return instruction_pointer(regs); } -unsigned long perf_misc_flags(struct pt_regs *regs) +unsigned long perf_arch_misc_flags(struct pt_regs *regs) { unsigned int guest_state = perf_guest_state(); int misc = 0; diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 5995614e9062..af0f46e2373b 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -102,8 +102,8 @@ struct power_pmu { int __init register_power_pmu(struct power_pmu *pmu); struct pt_regs; -extern unsigned long perf_misc_flags(struct pt_regs *regs); -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); +extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); extern unsigned long int read_bhrb(int n); /* @@ -111,7 +111,7 @@ extern unsigned long int read_bhrb(int n); * if we have hardware PMU support. */ #ifdef CONFIG_PPC_PERF_CTRS -#define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) #endif /* diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 42867469752d..dc01aa604cc1 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2332,7 +2332,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, * Called from generic code to get the misc flags (i.e. processor mode) * for an event_id. */ -unsigned long perf_misc_flags(struct pt_regs *regs) +unsigned long perf_arch_misc_flags(struct pt_regs *regs) { u32 flags = perf_get_misc_flags(regs); @@ -2346,7 +2346,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) * Called from generic code to get the instruction pointer * for an event_id. */ -unsigned long perf_instruction_pointer(struct pt_regs *regs) +unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) { unsigned long siar = mfspr(SPRN_SIAR); diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 66200d4a2134..8895e7d9bcf9 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -37,9 +37,9 @@ extern ssize_t cpumf_events_sysfs_show(struct device *dev, /* Perf callbacks */ struct pt_regs; -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -extern unsigned long perf_misc_flags(struct pt_regs *regs); -#define perf_misc_flags(regs) perf_misc_flags(regs) +extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); +#define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) #define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs /* Perf pt_regs extension for sample-data-entry indicators */ diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 5fff629b1a89..f9000ab49f4a 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -57,7 +57,7 @@ static unsigned long instruction_pointer_guest(struct pt_regs *regs) return sie_block(regs)->gpsw.addr; } -unsigned long perf_instruction_pointer(struct pt_regs *regs) +unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) { return is_in_guest(regs) ? instruction_pointer_guest(regs) : instruction_pointer(regs); @@ -84,7 +84,7 @@ static unsigned long perf_misc_flags_sf(struct pt_regs *regs) return flags; } -unsigned long perf_misc_flags(struct pt_regs *regs) +unsigned long perf_arch_misc_flags(struct pt_regs *regs) { /* Check if the cpum_sf PMU has created the pt_regs structure. * In this case, perf misc flags can be easily extracted. Otherwise, diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 65ab6460aed4..d19e939f3998 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -3003,7 +3003,7 @@ static unsigned long code_segment_base(struct pt_regs *regs) return 0; } -unsigned long perf_instruction_pointer(struct pt_regs *regs) +unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) { if (perf_guest_state()) return perf_guest_get_ip(); @@ -3011,7 +3011,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return regs->ip + code_segment_base(regs); } -unsigned long perf_misc_flags(struct pt_regs *regs) +unsigned long perf_arch_misc_flags(struct pt_regs *regs) { unsigned int guest_state = perf_guest_state(); int misc = 0; diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 91b73571412f..feb87bf3d2e9 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -536,15 +536,15 @@ struct x86_perf_regs { u64 *xmm_regs; }; -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -extern unsigned long perf_misc_flags(struct pt_regs *regs); -#define perf_misc_flags(regs) perf_misc_flags(regs) +extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); +#define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) #include /* - * We abuse bit 3 from flags to pass exact information, see perf_misc_flags - * and the comment with PERF_EFLAGS_EXACT. + * We abuse bit 3 from flags to pass exact information, see + * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT. */ #define perf_arch_fetch_caller_regs(regs, __ip) { \ (regs)->ip = (__ip); \ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 91b310052a7c..3b4bf5e329f6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1676,10 +1676,13 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record, struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); -#ifndef perf_misc_flags -# define perf_misc_flags(regs) \ +extern unsigned long perf_misc_flags(struct pt_regs *regs); +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); + +#ifndef perf_arch_misc_flags +# define perf_arch_misc_flags(regs) \ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) -# define perf_instruction_pointer(regs) instruction_pointer(regs) +# define perf_arch_instruction_pointer(regs) instruction_pointer(regs) #endif #ifndef perf_arch_bpf_user_pt_regs # define perf_arch_bpf_user_pt_regs(regs) regs diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e9cfe6f3535..6050ce033ead 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7026,6 +7026,16 @@ void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); #endif +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + return perf_arch_misc_flags(regs); +} + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + return perf_arch_instruction_pointer(regs); +} + static void perf_output_sample_regs(struct perf_output_handle *handle, struct pt_regs *regs, u64 mask) From 3e807cf07d96eedb57d91a91f611f828e8918aab Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Wed, 13 Nov 2024 19:01:53 +0000 Subject: [PATCH 30/32] perf/powerpc: Use perf_arch_instruction_pointer() Make sure PowerPC uses the arch-specific function now that those have been reorganized. Signed-off-by: Colton Lewis Signed-off-by: Ingo Molnar Reviewed-by: Oliver Upton Acked-by: Madhavan Srinivasan Link: https://lore.kernel.org/r/20241113190156.2145593-4-coltonlewis@google.com --- arch/powerpc/perf/callchain.c | 2 +- arch/powerpc/perf/callchain_32.c | 2 +- arch/powerpc/perf/callchain_64.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 6b4434dd0ff3..26aa26482c9a 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -51,7 +51,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re lr = regs->link; sp = regs->gpr[1]; - perf_callchain_store(entry, perf_instruction_pointer(regs)); + perf_callchain_store(entry, perf_arch_instruction_pointer(regs)); if (!validate_sp(sp, current)) return; diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c index ea8cfe3806dc..ddcc2d8aa64a 100644 --- a/arch/powerpc/perf/callchain_32.c +++ b/arch/powerpc/perf/callchain_32.c @@ -139,7 +139,7 @@ void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, long level = 0; unsigned int __user *fp, *uregs; - next_ip = perf_instruction_pointer(regs); + next_ip = perf_arch_instruction_pointer(regs); lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index 488e8a21a11e..115d1c105e8a 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -74,7 +74,7 @@ void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, struct signal_frame_64 __user *sigframe; unsigned long __user *fp, *uregs; - next_ip = perf_instruction_pointer(regs); + next_ip = perf_arch_instruction_pointer(regs); lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); From baff01f3d75ff3948a0465853dcaa71c394c5c46 Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Wed, 13 Nov 2024 19:01:54 +0000 Subject: [PATCH 31/32] perf/x86: Refactor misc flag assignments Break the assignment logic for misc flags into their own respective functions to reduce the complexity of the nested logic. Signed-off-by: Colton Lewis Signed-off-by: Ingo Molnar Reviewed-by: Oliver Upton Acked-by: Kan Liang Link: https://lore.kernel.org/r/20241113190156.2145593-5-coltonlewis@google.com --- arch/x86/events/core.c | 59 ++++++++++++++++++++++--------- arch/x86/include/asm/perf_event.h | 2 ++ 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d19e939f3998..bfc0a35fe399 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -3011,27 +3011,52 @@ unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) return regs->ip + code_segment_base(regs); } +static unsigned long common_misc_flags(struct pt_regs *regs) +{ + if (regs->flags & PERF_EFLAGS_EXACT) + return PERF_RECORD_MISC_EXACT_IP; + + return 0; +} + +static unsigned long guest_misc_flags(struct pt_regs *regs) +{ + unsigned long guest_state = perf_guest_state(); + + if (!(guest_state & PERF_GUEST_ACTIVE)) + return 0; + + if (guest_state & PERF_GUEST_USER) + return PERF_RECORD_MISC_GUEST_USER; + else + return PERF_RECORD_MISC_GUEST_KERNEL; + +} + +static unsigned long host_misc_flags(struct pt_regs *regs) +{ + if (user_mode(regs)) + return PERF_RECORD_MISC_USER; + else + return PERF_RECORD_MISC_KERNEL; +} + +unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs) +{ + unsigned long flags = common_misc_flags(regs); + + flags |= guest_misc_flags(regs); + + return flags; +} + unsigned long perf_arch_misc_flags(struct pt_regs *regs) { - unsigned int guest_state = perf_guest_state(); - int misc = 0; + unsigned long flags = common_misc_flags(regs); - if (guest_state) { - if (guest_state & PERF_GUEST_USER) - misc |= PERF_RECORD_MISC_GUEST_USER; - else - misc |= PERF_RECORD_MISC_GUEST_KERNEL; - } else { - if (user_mode(regs)) - misc |= PERF_RECORD_MISC_USER; - else - misc |= PERF_RECORD_MISC_KERNEL; - } + flags |= host_misc_flags(regs); - if (regs->flags & PERF_EFLAGS_EXACT) - misc |= PERF_RECORD_MISC_EXACT_IP; - - return misc; + return flags; } void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index feb87bf3d2e9..d95f902acc52 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -538,7 +538,9 @@ struct x86_perf_regs { extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); +extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs); #define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) +#define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) #include From 2c47e7a74f445426d156278e339b7abb259e50de Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Wed, 13 Nov 2024 19:01:55 +0000 Subject: [PATCH 32/32] perf/core: Correct perf sampling with guest VMs Previously any PMU overflow interrupt that fired while a VCPU was loaded was recorded as a guest event whether it truly was or not. This resulted in nonsense perf recordings that did not honor perf_event_attr.exclude_guest and recorded guest IPs where it should have recorded host IPs. Rework the sampling logic to only record guest samples for events with exclude_guest = 0. This way any host-only events with exclude_guest set will never see unexpected guest samples. The behaviour of events with exclude_guest = 0 is unchanged. Note that events configured to sample both host and guest may still misattribute a PMI that arrived in the host as a guest event depending on KVM arch and vendor behavior. Signed-off-by: Colton Lewis Signed-off-by: Ingo Molnar Reviewed-by: Oliver Upton Acked-by: Mark Rutland Acked-by: Kan Liang Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Alexander Shishkin Cc: Namhyung Kim Link: https://lore.kernel.org/r/20241113190156.2145593-6-coltonlewis@google.com --- arch/arm64/include/asm/perf_event.h | 4 ---- arch/arm64/kernel/perf_callchain.c | 28 ---------------------------- arch/x86/events/core.c | 3 --- include/linux/perf_event.h | 21 +++++++++++++++++++-- kernel/events/core.c | 21 +++++++++++++++++---- 5 files changed, 36 insertions(+), 41 deletions(-) diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 31a5584ed423..ee45b4e77347 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -10,10 +10,6 @@ #include #ifdef CONFIG_PERF_EVENTS -struct pt_regs; -extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); -extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); -#define perf_arch_misc_flags(regs) perf_misc_flags(regs) #define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs #endif diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 01a9d08fc009..9b7f26b128b5 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -38,31 +38,3 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, arch_stack_walk(callchain_trace, entry, current, regs); } - -unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) -{ - if (perf_guest_state()) - return perf_guest_get_ip(); - - return instruction_pointer(regs); -} - -unsigned long perf_arch_misc_flags(struct pt_regs *regs) -{ - unsigned int guest_state = perf_guest_state(); - int misc = 0; - - if (guest_state) { - if (guest_state & PERF_GUEST_USER) - misc |= PERF_RECORD_MISC_GUEST_USER; - else - misc |= PERF_RECORD_MISC_GUEST_KERNEL; - } else { - if (user_mode(regs)) - misc |= PERF_RECORD_MISC_USER; - else - misc |= PERF_RECORD_MISC_KERNEL; - } - - return misc; -} diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index bfc0a35fe399..c75c482d4c52 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -3005,9 +3005,6 @@ static unsigned long code_segment_base(struct pt_regs *regs) unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_state()) - return perf_guest_get_ip(); - return regs->ip + code_segment_base(regs); } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3b4bf5e329f6..cb99ec8c9e96 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1676,8 +1676,9 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record, struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); -extern unsigned long perf_misc_flags(struct pt_regs *regs); -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs); +extern unsigned long perf_instruction_pointer(struct perf_event *event, + struct pt_regs *regs); #ifndef perf_arch_misc_flags # define perf_arch_misc_flags(regs) \ @@ -1688,6 +1689,22 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); # define perf_arch_bpf_user_pt_regs(regs) regs #endif +#ifndef perf_arch_guest_misc_flags +static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs) +{ + unsigned long guest_state = perf_guest_state(); + + if (!(guest_state & PERF_GUEST_ACTIVE)) + return 0; + + if (guest_state & PERF_GUEST_USER) + return PERF_RECORD_MISC_GUEST_USER; + else + return PERF_RECORD_MISC_GUEST_KERNEL; +} +# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) +#endif + static inline bool has_branch_stack(struct perf_event *event) { return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; diff --git a/kernel/events/core.c b/kernel/events/core.c index 6050ce033ead..1869164a4e99 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7026,13 +7026,26 @@ void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); #endif -unsigned long perf_misc_flags(struct pt_regs *regs) +static bool should_sample_guest(struct perf_event *event) { + return !event->attr.exclude_guest && perf_guest_state(); +} + +unsigned long perf_misc_flags(struct perf_event *event, + struct pt_regs *regs) +{ + if (should_sample_guest(event)) + return perf_arch_guest_misc_flags(regs); + return perf_arch_misc_flags(regs); } -unsigned long perf_instruction_pointer(struct pt_regs *regs) +unsigned long perf_instruction_pointer(struct perf_event *event, + struct pt_regs *regs) { + if (should_sample_guest(event)) + return perf_guest_get_ip(); + return perf_arch_instruction_pointer(regs); } @@ -7853,7 +7866,7 @@ void perf_prepare_sample(struct perf_sample_data *data, __perf_event_header__init_id(data, event, filtered_sample_type); if (filtered_sample_type & PERF_SAMPLE_IP) { - data->ip = perf_instruction_pointer(regs); + data->ip = perf_instruction_pointer(event, regs); data->sample_flags |= PERF_SAMPLE_IP; } @@ -8017,7 +8030,7 @@ void perf_prepare_header(struct perf_event_header *header, { header->type = PERF_RECORD_SAMPLE; header->size = perf_sample_data_size(data, event); - header->misc = perf_misc_flags(regs); + header->misc = perf_misc_flags(event, regs); /* * If you're adding more sample types here, you likely need to do