mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
54c3931957
Currently, The arguments passing to lockdep_hardirqs_{on,off} was fixed
in CALLER_ADDR0.
The function trace_hardirqs_on_caller should have been intended to use
caller_addr to represent the address that caller wants to be traced.
For example, lockdep log in riscv showing the last {enabled,disabled} at
__trace_hardirqs_{on,off} all the time(if called by):
[ 57.853175] hardirqs last enabled at (2519): __trace_hardirqs_on+0xc/0x14
[ 57.853848] hardirqs last disabled at (2520): __trace_hardirqs_off+0xc/0x14
After use trace_hardirqs_xx_caller, we can get more effective information:
[ 53.781428] hardirqs last enabled at (2595): restore_all+0xe/0x66
[ 53.782185] hardirqs last disabled at (2596): ret_from_exception+0xa/0x10
Link: https://lkml.kernel.org/r/20220901104515.135162-2-zouyipeng@huawei.com
Cc: stable@vger.kernel.org
Fixes: c3bc8fd637
("tracing: Centralize preemptirq tracepoints and unify their usage")
Signed-off-by: Yipeng Zou <zouyipeng@huawei.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
134 lines
3.6 KiB
C
134 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* preemptoff and irqoff tracepoints
|
|
*
|
|
* Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
|
|
*/
|
|
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/module.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/kprobes.h>
|
|
#include "trace.h"
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/preemptirq.h>
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
/* Per-cpu variable to prevent redundant calls when IRQs already off */
|
|
static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
|
|
|
/*
|
|
* Like trace_hardirqs_on() but without the lockdep invocation. This is
|
|
* used in the low level entry code where the ordering vs. RCU is important
|
|
* and lockdep uses a staged approach which splits the lockdep hardirq
|
|
* tracking into a RCU on and a RCU off section.
|
|
*/
|
|
void trace_hardirqs_on_prepare(void)
|
|
{
|
|
if (this_cpu_read(tracing_irq_cpu)) {
|
|
if (!in_nmi())
|
|
trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
|
|
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
|
this_cpu_write(tracing_irq_cpu, 0);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_on_prepare);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
|
|
|
|
void trace_hardirqs_on(void)
|
|
{
|
|
if (this_cpu_read(tracing_irq_cpu)) {
|
|
if (!in_nmi())
|
|
trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
|
this_cpu_write(tracing_irq_cpu, 0);
|
|
}
|
|
|
|
lockdep_hardirqs_on_prepare();
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_on);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_on);
|
|
|
|
/*
|
|
* Like trace_hardirqs_off() but without the lockdep invocation. This is
|
|
* used in the low level entry code where the ordering vs. RCU is important
|
|
* and lockdep uses a staged approach which splits the lockdep hardirq
|
|
* tracking into a RCU on and a RCU off section.
|
|
*/
|
|
void trace_hardirqs_off_finish(void)
|
|
{
|
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
|
this_cpu_write(tracing_irq_cpu, 1);
|
|
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
|
if (!in_nmi())
|
|
trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
|
|
}
|
|
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off_finish);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
|
|
|
|
void trace_hardirqs_off(void)
|
|
{
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
|
|
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
|
this_cpu_write(tracing_irq_cpu, 1);
|
|
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
|
if (!in_nmi())
|
|
trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_off);
|
|
|
|
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
|
{
|
|
if (this_cpu_read(tracing_irq_cpu)) {
|
|
if (!in_nmi())
|
|
trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
|
|
this_cpu_write(tracing_irq_cpu, 0);
|
|
}
|
|
|
|
lockdep_hardirqs_on_prepare();
|
|
lockdep_hardirqs_on(caller_addr);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
|
|
|
|
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
|
|
{
|
|
lockdep_hardirqs_off(caller_addr);
|
|
|
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
|
this_cpu_write(tracing_irq_cpu, 1);
|
|
tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
|
|
if (!in_nmi())
|
|
trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
|
|
#endif /* CONFIG_TRACE_IRQFLAGS */
|
|
|
|
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
|
|
|
|
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
|
{
|
|
if (!in_nmi())
|
|
trace_preempt_enable_rcuidle(a0, a1);
|
|
tracer_preempt_on(a0, a1);
|
|
}
|
|
|
|
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
|
{
|
|
if (!in_nmi())
|
|
trace_preempt_disable_rcuidle(a0, a1);
|
|
tracer_preempt_off(a0, a1);
|
|
}
|
|
#endif
|