linux/arch/csky/kernel/perf_callchain.c
Sean Christopherson ff083a2d97 perf: Protect perf_guest_cbs with RCU
Protect perf_guest_cbs with RCU to fix multiple possible errors.  Luckily,
all paths that read perf_guest_cbs already require RCU protection, e.g. to
protect the callback chains, so only the direct perf_guest_cbs touchpoints
need to be modified.

Bug #1 is a simple lack of WRITE_ONCE/READ_ONCE behavior to ensure
perf_guest_cbs isn't reloaded between a !NULL check and a dereference.
Fixed via the READ_ONCE() in rcu_dereference().

Bug #2 is that on weakly-ordered architectures, updates to the callbacks
themselves are not guaranteed to be visible before the pointer is made
visible to readers.  Fixed by the smp_store_release() in
rcu_assign_pointer() when the new pointer is non-NULL.

Bug #3 is that, because the callbacks are global, it's possible for
readers to run in parallel with an unregisters, and thus a module
implementing the callbacks can be unloaded while readers are in flight,
resulting in a use-after-free.  Fixed by a synchronize_rcu() call when
unregistering callbacks.

Bug #1 escaped notice because it's extremely unlikely a compiler will
reload perf_guest_cbs in this sequence.  perf_guest_cbs does get reloaded
for future derefs, e.g. for ->is_user_mode(), but the ->is_in_guest()
guard all but guarantees the consumer will win the race, e.g. to nullify
perf_guest_cbs, KVM has to completely exit the guest and teardown down
all VMs before KVM start its module unload / unregister sequence.  This
also makes it all but impossible to encounter bug #3.

Bug #2 has not been a problem because all architectures that register
callbacks are strongly ordered and/or have a static set of callbacks.

But with help, unloading kvm_intel can trigger bug #1 e.g. wrapping
perf_guest_cbs with READ_ONCE in perf_misc_flags() while spamming
kvm_intel module load/unload leads to:

  BUG: kernel NULL pointer dereference, address: 0000000000000000
  #PF: supervisor read access in kernel mode
  #PF: error_code(0x0000) - not-present page
  PGD 0 P4D 0
  Oops: 0000 [#1] PREEMPT SMP
  CPU: 6 PID: 1825 Comm: stress Not tainted 5.14.0-rc2+ #459
  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
  RIP: 0010:perf_misc_flags+0x1c/0x70
  Call Trace:
   perf_prepare_sample+0x53/0x6b0
   perf_event_output_forward+0x67/0x160
   __perf_event_overflow+0x52/0xf0
   handle_pmi_common+0x207/0x300
   intel_pmu_handle_irq+0xcf/0x410
   perf_event_nmi_handler+0x28/0x50
   nmi_handle+0xc7/0x260
   default_do_nmi+0x6b/0x170
   exc_nmi+0x103/0x130
   asm_exc_nmi+0x76/0xbf

Fixes: 39447b386c ("perf: Enhance perf to allow for guest statistic collection from host")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20211111020738.2512932-2-seanjc@google.com
2021-11-17 14:49:06 +01:00

127 lines
3.2 KiB
C

// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/perf_event.h>
#include <linux/uaccess.h>
/* Kernel callchain */
struct stackframe {
unsigned long fp;
unsigned long lr;
};
static int unwind_frame_kernel(struct stackframe *frame)
{
unsigned long low = (unsigned long)task_stack_page(current);
unsigned long high = low + THREAD_SIZE;
if (unlikely(frame->fp < low || frame->fp > high))
return -EPERM;
if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
return -EPERM;
*frame = *(struct stackframe *)frame->fp;
if (__kernel_text_address(frame->lr)) {
int graph = 0;
frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr,
NULL);
}
return 0;
}
static void notrace walk_stackframe(struct stackframe *fr,
struct perf_callchain_entry_ctx *entry)
{
do {
perf_callchain_store(entry, fr->lr);
} while (unwind_frame_kernel(fr) >= 0);
}
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
unsigned long fp, unsigned long reg_lr)
{
struct stackframe buftail;
unsigned long lr = 0;
unsigned long *user_frame_tail = (unsigned long *)fp;
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
sizeof(buftail)))
return 0;
if (reg_lr != 0)
lr = reg_lr;
else
lr = buftail.lr;
fp = buftail.fp;
perf_callchain_store(entry, lr);
return fp;
}
/*
* This will be called when the target is in user mode
* This function will only be called when we use
* "PERF_SAMPLE_CALLCHAIN" in
* kernel/events/core.c:perf_prepare_sample()
*
* How to trigger perf_callchain_[user/kernel] :
* $ perf record -e cpu-clock --call-graph fp ./program
* $ perf report --call-graph
*
* On C-SKY platform, the program being sampled and the C library
* need to be compiled with * -mbacktrace, otherwise the user
* stack will not contain function frame.
*/
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
unsigned long fp = 0;
/* C-SKY does not support virtualization. */
if (guest_cbs && guest_cbs->is_in_guest())
return;
fp = regs->regs[4];
perf_callchain_store(entry, regs->pc);
/*
* While backtrace from leaf function, lr is normally
* not saved inside frame on C-SKY, so get lr from pt_regs
* at the sample point. However, lr value can be incorrect if
* lr is used as temp register
*/
fp = user_backtrace(entry, fp, regs->lr);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct stackframe fr;
/* C-SKY does not support virtualization. */
if (guest_cbs && guest_cbs->is_in_guest()) {
pr_warn("C-SKY does not support perf in guest mode!");
return;
}
fr.fp = regs->regs[4];
fr.lr = regs->lr;
walk_stackframe(&fr, entry);
}