Tracing updates for 4.15:

- Now allow module init functions to be traced
 
  - Clean up some unused or not used by config events (saves space)
 
  - Clean up of trace histogram code
 
  - Add support for preempt and interrupt enabled/disable events
 
  - Other various clean ups
 -----BEGIN PGP SIGNATURE-----
 
 iQHIBAABCgAyFiEEPm6V/WuN2kyArTUe1a05Y9njSUkFAloPGgkUHHJvc3RlZHRA
 Z29vZG1pcy5vcmcACgkQ1a05Y9njSUmfaAwAjge5FWBCBQeby8tVuw4RGAorRgl5
 IFuijFSygcKRMhQFP6B+haHsezeCbNaBBtIncXhoJGDC5XuhUhr9foYf1SChEmYp
 tCOK2o71FgZ8yG539IYCVjG9cJZxPLM0OI7RQ8hcMETAr+eiXPXxHrmrm9kdBtYM
 ZAQERvqI5yu2HWIb87KBc38H0rgYrOJKZt9Rx20as/aqAME7hFvYErFlcnxdmHo+
 LmovJOQBCTicNJ4TXJc418JaUWi9cm/A3uhW3o5aLMoRAxCc/8FD+dq2rg4qlHDH
 tOtK6pwIPHfqRZ3nMLXXWhaa+w+swsxBOnegkvgP2xCyibKjFgh9kzcpaj41w3x1
 0FCfvS7flx9ob//fAB8kxLvJyY5p3Qp3xdvj0+gp2qa3Ga5lSqcMzS419TLY1Yfa
 Jpi2oAagDqP94m0EjAGTkhZMOrsFIDr49g3h7nqz3T3Z54luyXniDoYoO11d+dUF
 vCUiIJz/PsQIE3NVViZiaRtcLVXneLHISmnz
 =h3F2
 -----END PGP SIGNATURE-----

Merge tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from

 - allow module init functions to be traced

 - clean up some unused or not used by config events (saves space)

 - clean up of trace histogram code

 - add support for preempt and interrupt enabled/disable events

 - other various clean ups

* tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (30 commits)
  tracing, thermal: Hide cpu cooling trace events when not in use
  tracing, thermal: Hide devfreq trace events when not in use
  ftrace: Kill FTRACE_OPS_FL_PER_CPU
  perf/ftrace: Small cleanup
  perf/ftrace: Fix function trace events
  perf/ftrace: Revert ("perf/ftrace: Fix double traces of perf on ftrace:function")
  tracing, dma-buf: Remove unused trace event dma_fence_annotate_wait_on
  tracing, memcg, vmscan: Hide trace events when not in use
  tracing/xen: Hide events that are not used when X86_PAE is not defined
  tracing: mark trace_test_buffer as __maybe_unused
  printk: Remove superfluous memory barriers from printk_safe
  ftrace: Clear hashes of stale ips of init memory
  tracing: Add support for preempt and irq enable/disable events
  tracing: Prepare to add preempt and irq trace events
  ftrace/kallsyms: Have /proc/kallsyms show saved mod init functions
  ftrace: Add freeing algorithm to free ftrace_mod_maps
  ftrace: Save module init functions kallsyms symbols for tracing
  ftrace: Allow module init functions to be traced
  ftrace: Add a ftrace_free_mem() function for modules to use
  tracing: Reimplement log2
  ...
This commit is contained in:
Linus Torvalds 2017-11-17 14:58:01 -08:00
commit 2dcd9c71c1
32 changed files with 903 additions and 518 deletions

View File

@ -27,7 +27,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/dma_fence.h> #include <trace/events/dma_fence.h>
EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);

View File

@ -52,6 +52,30 @@ static inline void early_trace_init(void) { }
struct module; struct module;
struct ftrace_hash; struct ftrace_hash;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported);
#else
static inline const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
return NULL;
}
static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported)
{
return -1;
}
#endif
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled; extern int ftrace_enabled;
@ -79,10 +103,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ENABLED - set/unset when ftrace_ops is registered/unregistered * ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically * DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care * allocated ftrace_ops which need special care
* PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
* could be controlled by following calls:
* ftrace_function_local_enable
* ftrace_function_local_disable
* SAVE_REGS - The ftrace_ops wants regs saved at each function called * SAVE_REGS - The ftrace_ops wants regs saved at each function called
* and passed to the callback. If this flag is set, but the * and passed to the callback. If this flag is set, but the
* architecture does not support passing regs * architecture does not support passing regs
@ -126,21 +146,20 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_DYNAMIC = 1 << 1, FTRACE_OPS_FL_DYNAMIC = 1 << 1,
FTRACE_OPS_FL_PER_CPU = 1 << 2, FTRACE_OPS_FL_SAVE_REGS = 1 << 2,
FTRACE_OPS_FL_SAVE_REGS = 1 << 3, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, FTRACE_OPS_FL_STUB = 1 << 5,
FTRACE_OPS_FL_STUB = 1 << 6, FTRACE_OPS_FL_INITIALIZED = 1 << 6,
FTRACE_OPS_FL_INITIALIZED = 1 << 7, FTRACE_OPS_FL_DELETED = 1 << 7,
FTRACE_OPS_FL_DELETED = 1 << 8, FTRACE_OPS_FL_ADDING = 1 << 8,
FTRACE_OPS_FL_ADDING = 1 << 9, FTRACE_OPS_FL_REMOVING = 1 << 9,
FTRACE_OPS_FL_REMOVING = 1 << 10, FTRACE_OPS_FL_MODIFYING = 1 << 10,
FTRACE_OPS_FL_MODIFYING = 1 << 11, FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11,
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, FTRACE_OPS_FL_IPMODIFY = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13, FTRACE_OPS_FL_PID = 1 << 13,
FTRACE_OPS_FL_PID = 1 << 14, FTRACE_OPS_FL_RCU = 1 << 14,
FTRACE_OPS_FL_RCU = 1 << 15, FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15,
FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
}; };
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
@ -152,8 +171,10 @@ struct ftrace_ops_hash {
}; };
void ftrace_free_init_mem(void); void ftrace_free_init_mem(void);
void ftrace_free_mem(struct module *mod, void *start, void *end);
#else #else
static inline void ftrace_free_init_mem(void) { } static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif #endif
/* /*
@ -173,7 +194,6 @@ struct ftrace_ops {
unsigned long flags; unsigned long flags;
void *private; void *private;
ftrace_func_t saved_func; ftrace_func_t saved_func;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_ops_hash local_hash; struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash *func_hash;
@ -205,55 +225,6 @@ int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void); void clear_ftrace_function(void);
/**
* ftrace_function_local_enable - enable ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return;
(*this_cpu_ptr(ops->disabled))--;
}
/**
* ftrace_function_local_disable - disable ftrace_ops on current cpu
*
* This function disables tracing on current cpu by increasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return;
(*this_cpu_ptr(ops->disabled))++;
}
/**
* ftrace_function_local_disabled - returns ftrace_ops disabled value
* on current cpu
*
* This function returns value of ftrace_ops::disabled on current cpu.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
return *this_cpu_ptr(ops->disabled);
}
extern void ftrace_stub(unsigned long a0, unsigned long a1, extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs); struct ftrace_ops *op, struct pt_regs *regs);
@ -271,6 +242,7 @@ static inline int ftrace_nr_registered_ops(void)
static inline void clear_ftrace_function(void) { } static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { } static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { } static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER #ifdef CONFIG_STACK_TRACER
@ -743,7 +715,8 @@ static inline unsigned long get_lock_parent_ip(void)
static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
#endif #endif
#ifdef CONFIG_PREEMPT_TRACER #if defined(CONFIG_PREEMPT_TRACER) || \
(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
extern void trace_preempt_on(unsigned long a0, unsigned long a1); extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1); extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else #else

View File

@ -40,7 +40,7 @@
/* These are for everybody (although not all archs will actually /* These are for everybody (although not all archs will actually
discard it in modules) */ discard it in modules) */
#define __init __section(.init.text) __cold __inittrace __latent_entropy #define __init __section(.init.text) __cold __latent_entropy
#define __initdata __section(.init.data) #define __initdata __section(.init.data)
#define __initconst __section(.init.rodata) #define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data) #define __exitdata __section(.exit.data)
@ -69,10 +69,8 @@
#ifdef MODULE #ifdef MODULE
#define __exitused #define __exitused
#define __inittrace notrace
#else #else
#define __exitused __used #define __exitused __used
#define __inittrace
#endif #endif
#define __exit __section(.exit.text) __exitused __cold notrace #define __exit __section(.exit.text) __exitused __cold notrace

View File

@ -1169,7 +1169,7 @@ extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record, extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs, int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx, struct hlist_head *head, int rctx,
struct task_struct *task, struct perf_event *event); struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data); extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags #ifndef perf_misc_flags

View File

@ -174,6 +174,11 @@ enum trace_reg {
TRACE_REG_PERF_UNREGISTER, TRACE_REG_PERF_UNREGISTER,
TRACE_REG_PERF_OPEN, TRACE_REG_PERF_OPEN,
TRACE_REG_PERF_CLOSE, TRACE_REG_PERF_CLOSE,
/*
* These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
* custom action was taken and the default action is not to be
* performed.
*/
TRACE_REG_PERF_ADD, TRACE_REG_PERF_ADD,
TRACE_REG_PERF_DEL, TRACE_REG_PERF_DEL,
#endif #endif
@ -542,9 +547,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
static inline void static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head, u64 count, struct pt_regs *regs, void *head,
struct task_struct *task, struct perf_event *event) struct task_struct *task)
{ {
perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event); perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
} }
#endif #endif

View File

@ -9,46 +9,6 @@
struct dma_fence; struct dma_fence;
TRACE_EVENT(dma_fence_annotate_wait_on,
/* fence: the fence waiting on f1, f1: the fence to be waited on. */
TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
TP_ARGS(fence, f1),
TP_STRUCT__entry(
__string(driver, fence->ops->get_driver_name(fence))
__string(timeline, fence->ops->get_timeline_name(fence))
__field(unsigned int, context)
__field(unsigned int, seqno)
__string(waiting_driver, f1->ops->get_driver_name(f1))
__string(waiting_timeline, f1->ops->get_timeline_name(f1))
__field(unsigned int, waiting_context)
__field(unsigned int, waiting_seqno)
),
TP_fast_assign(
__assign_str(driver, fence->ops->get_driver_name(fence))
__assign_str(timeline, fence->ops->get_timeline_name(fence))
__entry->context = fence->context;
__entry->seqno = fence->seqno;
__assign_str(waiting_driver, f1->ops->get_driver_name(f1))
__assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
__entry->waiting_context = f1->context;
__entry->waiting_seqno = f1->seqno;
),
TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
"waits on driver=%s timeline=%s context=%u seqno=%u",
__get_str(driver), __get_str(timeline), __entry->context,
__entry->seqno,
__get_str(waiting_driver), __get_str(waiting_timeline),
__entry->waiting_context, __entry->waiting_seqno)
);
DECLARE_EVENT_CLASS(dma_fence, DECLARE_EVENT_CLASS(dma_fence,
TP_PROTO(struct dma_fence *fence), TP_PROTO(struct dma_fence *fence),

View File

@ -0,0 +1,70 @@
#ifdef CONFIG_PREEMPTIRQ_EVENTS
#undef TRACE_SYSTEM
#define TRACE_SYSTEM preemptirq
#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PREEMPTIRQ_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#include <linux/string.h>
#include <asm/sections.h>
DECLARE_EVENT_CLASS(preemptirq_template,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip),
TP_STRUCT__entry(
__field(u32, caller_offs)
__field(u32, parent_offs)
),
TP_fast_assign(
__entry->caller_offs = (u32)(ip - (unsigned long)_stext);
__entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
),
TP_printk("caller=%pF parent=%pF",
(void *)((unsigned long)(_stext) + __entry->caller_offs),
(void *)((unsigned long)(_stext) + __entry->parent_offs))
);
#ifndef CONFIG_PROVE_LOCKING
DEFINE_EVENT(preemptirq_template, irq_disable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
DEFINE_EVENT(preemptirq_template, irq_enable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
#endif
#ifdef CONFIG_DEBUG_PREEMPT
DEFINE_EVENT(preemptirq_template, preempt_disable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
DEFINE_EVENT(preemptirq_template, preempt_enable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
#endif
#endif /* _TRACE_PREEMPTIRQ_H */
#include <trace/define_trace.h>
#else /* !CONFIG_PREEMPTIRQ_EVENTS */
#define trace_irq_enable(...)
#define trace_irq_disable(...)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
#define trace_irq_enable_rcuidle(...)
#define trace_irq_disable_rcuidle(...)
#define trace_preempt_enable_rcuidle(...)
#define trace_preempt_disable_rcuidle(...)
#endif

View File

@ -91,6 +91,7 @@ TRACE_EVENT(thermal_zone_trip,
show_tzt_type(__entry->trip_type)) show_tzt_type(__entry->trip_type))
); );
#ifdef CONFIG_CPU_THERMAL
TRACE_EVENT(thermal_power_cpu_get_power, TRACE_EVENT(thermal_power_cpu_get_power,
TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load, TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
size_t load_len, u32 dynamic_power, u32 static_power), size_t load_len, u32 dynamic_power, u32 static_power),
@ -148,7 +149,9 @@ TRACE_EVENT(thermal_power_cpu_limit,
__get_bitmask(cpumask), __entry->freq, __entry->cdev_state, __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
__entry->power) __entry->power)
); );
#endif /* CONFIG_CPU_THERMAL */
#ifdef CONFIG_DEVFREQ_THERMAL
TRACE_EVENT(thermal_power_devfreq_get_power, TRACE_EVENT(thermal_power_devfreq_get_power,
TP_PROTO(struct thermal_cooling_device *cdev, TP_PROTO(struct thermal_cooling_device *cdev,
struct devfreq_dev_status *status, unsigned long freq, struct devfreq_dev_status *status, unsigned long freq,
@ -204,6 +207,7 @@ TRACE_EVENT(thermal_power_devfreq_limit,
__get_str(type), __entry->freq, __entry->cdev_state, __get_str(type), __entry->freq, __entry->cdev_state,
__entry->power) __entry->power)
); );
#endif /* CONFIG_DEVFREQ_THERMAL */
#endif /* _TRACE_THERMAL_H */ #endif /* _TRACE_THERMAL_H */
/* This part must be outside protection */ /* This part must be outside protection */

View File

@ -134,6 +134,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_b
TP_ARGS(order, may_writepage, gfp_flags, classzone_idx) TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
); );
#ifdef CONFIG_MEMCG
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin, DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx), TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
@ -147,6 +148,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_
TP_ARGS(order, may_writepage, gfp_flags, classzone_idx) TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
); );
#endif /* CONFIG_MEMCG */
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template, DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
@ -172,6 +174,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end
TP_ARGS(nr_reclaimed) TP_ARGS(nr_reclaimed)
); );
#ifdef CONFIG_MEMCG
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end, DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
TP_PROTO(unsigned long nr_reclaimed), TP_PROTO(unsigned long nr_reclaimed),
@ -185,6 +188,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
TP_ARGS(nr_reclaimed) TP_ARGS(nr_reclaimed)
); );
#endif /* CONFIG_MEMCG */
TRACE_EVENT(mm_shrink_slab_start, TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc, TP_PROTO(struct shrinker *shr, struct shrink_control *sc,

View File

@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
TP_ARGS(ptep, pteval)) TP_ARGS(ptep, pteval))
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
TRACE_EVENT(xen_mmu_set_pte_at, TRACE_EVENT(xen_mmu_set_pte_at,
TP_PROTO(struct mm_struct *mm, unsigned long addr, TP_PROTO(struct mm_struct *mm, unsigned long addr,
@ -170,21 +169,6 @@ TRACE_EVENT(xen_mmu_set_pte_at,
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
); );
TRACE_EVENT(xen_mmu_pte_clear,
TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
TP_ARGS(mm, addr, ptep),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, addr)
__field(pte_t *, ptep)
),
TP_fast_assign(__entry->mm = mm;
__entry->addr = addr;
__entry->ptep = ptep),
TP_printk("mm %p addr %lx ptep %p",
__entry->mm, __entry->addr, __entry->ptep)
);
TRACE_DEFINE_SIZEOF(pmdval_t); TRACE_DEFINE_SIZEOF(pmdval_t);
TRACE_EVENT(xen_mmu_set_pmd, TRACE_EVENT(xen_mmu_set_pmd,
@ -202,6 +186,24 @@ TRACE_EVENT(xen_mmu_set_pmd,
(int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval) (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
); );
#ifdef CONFIG_X86_PAE
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
TRACE_EVENT(xen_mmu_pte_clear,
TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
TP_ARGS(mm, addr, ptep),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, addr)
__field(pte_t *, ptep)
),
TP_fast_assign(__entry->mm = mm;
__entry->addr = addr;
__entry->ptep = ptep),
TP_printk("mm %p addr %lx ptep %p",
__entry->mm, __entry->addr, __entry->ptep)
);
TRACE_EVENT(xen_mmu_pmd_clear, TRACE_EVENT(xen_mmu_pmd_clear,
TP_PROTO(pmd_t *pmdp), TP_PROTO(pmd_t *pmdp),
TP_ARGS(pmdp), TP_ARGS(pmdp),
@ -211,6 +213,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
TP_fast_assign(__entry->pmdp = pmdp), TP_fast_assign(__entry->pmdp = pmdp),
TP_printk("pmdp %p", __entry->pmdp) TP_printk("pmdp %p", __entry->pmdp)
); );
#endif
#if CONFIG_PGTABLE_LEVELS >= 4 #if CONFIG_PGTABLE_LEVELS >= 4

View File

@ -7874,15 +7874,16 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
} }
} }
perf_tp_event(call->event.type, count, raw_data, size, regs, head, perf_tp_event(call->event.type, count, raw_data, size, regs, head,
rctx, task, NULL); rctx, task);
} }
EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx, struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task, struct perf_event *event) struct task_struct *task)
{ {
struct perf_sample_data data; struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = { struct perf_raw_record raw = {
.frag = { .frag = {
@ -7896,15 +7897,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
perf_trace_buf_update(record, event_type); perf_trace_buf_update(record, event_type);
/* Use the given event instead of the hlist */ hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (event) {
if (perf_tp_event_match(event, &data, regs)) if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs); perf_swevent_event(event, count, &data, regs);
} else {
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
} }
/* /*

View File

@ -24,6 +24,7 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/ftrace.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/sections.h> #include <asm/sections.h>
@ -337,6 +338,10 @@ const char *kallsyms_lookup(unsigned long addr,
if (!ret) if (!ret)
ret = bpf_address_lookup(addr, symbolsize, ret = bpf_address_lookup(addr, symbolsize,
offset, modname, namebuf); offset, modname, namebuf);
if (!ret)
ret = ftrace_mod_address_lookup(addr, symbolsize,
offset, modname, namebuf);
return ret; return ret;
} }
@ -474,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol);
struct kallsym_iter { struct kallsym_iter {
loff_t pos; loff_t pos;
loff_t pos_mod_end; loff_t pos_mod_end;
loff_t pos_ftrace_mod_end;
unsigned long value; unsigned long value;
unsigned int nameoff; /* If iterating in core kernel symbols. */ unsigned int nameoff; /* If iterating in core kernel symbols. */
char type; char type;
@ -497,11 +503,25 @@ static int get_ksymbol_mod(struct kallsym_iter *iter)
return 1; return 1;
} }
static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
{
int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
&iter->value, &iter->type,
iter->name, iter->module_name,
&iter->exported);
if (ret < 0) {
iter->pos_ftrace_mod_end = iter->pos;
return 0;
}
return 1;
}
static int get_ksymbol_bpf(struct kallsym_iter *iter) static int get_ksymbol_bpf(struct kallsym_iter *iter)
{ {
iter->module_name[0] = '\0'; iter->module_name[0] = '\0';
iter->exported = 0; iter->exported = 0;
return bpf_get_kallsym(iter->pos - iter->pos_mod_end, return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
&iter->value, &iter->type, &iter->value, &iter->type,
iter->name) < 0 ? 0 : 1; iter->name) < 0 ? 0 : 1;
} }
@ -526,20 +546,31 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
iter->name[0] = '\0'; iter->name[0] = '\0';
iter->nameoff = get_symbol_offset(new_pos); iter->nameoff = get_symbol_offset(new_pos);
iter->pos = new_pos; iter->pos = new_pos;
if (new_pos == 0) if (new_pos == 0) {
iter->pos_mod_end = 0; iter->pos_mod_end = 0;
iter->pos_ftrace_mod_end = 0;
}
} }
static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
{ {
iter->pos = pos; iter->pos = pos;
if (iter->pos_mod_end > 0 && if (iter->pos_ftrace_mod_end > 0 &&
iter->pos_mod_end < iter->pos) iter->pos_ftrace_mod_end < iter->pos)
return get_ksymbol_bpf(iter); return get_ksymbol_bpf(iter);
if (!get_ksymbol_mod(iter)) if (iter->pos_mod_end > 0 &&
return get_ksymbol_bpf(iter); iter->pos_mod_end < iter->pos) {
if (!get_ksymbol_ftrace_mod(iter))
return get_ksymbol_bpf(iter);
return 1;
}
if (!get_ksymbol_mod(iter)) {
if (!get_ksymbol_ftrace_mod(iter))
return get_ksymbol_bpf(iter);
}
return 1; return 1;
} }

View File

@ -3481,6 +3481,8 @@ static noinline int do_init_module(struct module *mod)
if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
async_synchronize_full(); async_synchronize_full();
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
mod->init_layout.size);
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
/* Drop initial reference. */ /* Drop initial reference. */
module_put(mod); module_put(mod);

View File

@ -39,7 +39,7 @@
* There are situations when we want to make sure that all buffers * There are situations when we want to make sure that all buffers
* were handled or when IRQs are blocked. * were handled or when IRQs are blocked.
*/ */
static int printk_safe_irq_ready; static int printk_safe_irq_ready __read_mostly;
#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
sizeof(atomic_t) - \ sizeof(atomic_t) - \
@ -63,11 +63,8 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
/* Get flushed in a more safe context. */ /* Get flushed in a more safe context. */
static void queue_flush_work(struct printk_safe_seq_buf *s) static void queue_flush_work(struct printk_safe_seq_buf *s)
{ {
if (printk_safe_irq_ready) { if (printk_safe_irq_ready)
/* Make sure that IRQ work is really initialized. */
smp_rmb();
irq_work_queue(&s->work); irq_work_queue(&s->work);
}
} }
/* /*
@ -398,8 +395,12 @@ void __init printk_safe_init(void)
#endif #endif
} }
/* Make sure that IRQ works are initialized before enabling. */ /*
smp_wmb(); * In the highly unlikely event that a NMI were to trigger at
* this moment. Make sure IRQ work is set up before this
* variable is set.
*/
barrier();
printk_safe_irq_ready = 1; printk_safe_irq_ready = 1;
/* Flush pending messages that did not have scheduled IRQ works. */ /* Flush pending messages that did not have scheduled IRQ works. */

View File

@ -160,6 +160,17 @@ config FUNCTION_GRAPH_TRACER
address on the current task structure into a stack of calls. address on the current task structure into a stack of calls.
config PREEMPTIRQ_EVENTS
bool "Enable trace events for preempt and irq disable/enable"
select TRACE_IRQFLAGS
depends on DEBUG_PREEMPT || !PROVE_LOCKING
default n
help
Enable tracing of disable and enable events for preemption and irqs.
For tracing preempt disable/enable events, DEBUG_PREEMPT must be
enabled. For tracing irq disable/enable events, PROVE_LOCKING must
be disabled.
config IRQSOFF_TRACER config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer" bool "Interrupts-off Latency Tracer"
default n default n

View File

@ -35,6 +35,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
obj-$(CONFIG_TRACING_MAP) += tracing_map.o obj-$(CONFIG_TRACING_MAP) += tracing_map.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o

View File

@ -203,30 +203,6 @@ void clear_ftrace_function(void)
ftrace_trace_function = ftrace_stub; ftrace_trace_function = ftrace_stub;
} }
static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
{
int cpu;
for_each_possible_cpu(cpu)
*per_cpu_ptr(ops->disabled, cpu) = 1;
}
static int per_cpu_ops_alloc(struct ftrace_ops *ops)
{
int __percpu *disabled;
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return -EINVAL;
disabled = alloc_percpu(int);
if (!disabled)
return -ENOMEM;
ops->disabled = disabled;
per_cpu_ops_disable_all(ops);
return 0;
}
static void ftrace_sync(struct work_struct *work) static void ftrace_sync(struct work_struct *work)
{ {
/* /*
@ -262,8 +238,8 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
* If this is a dynamic, RCU, or per CPU ops, or we force list func, * If this is a dynamic, RCU, or per CPU ops, or we force list func,
* then it needs to call the list anyway. * then it needs to call the list anyway.
*/ */
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC) FTRACE_FORCE_LIST_FUNC)
return ftrace_ops_list_func; return ftrace_ops_list_func;
return ftrace_ops_get_func(ops); return ftrace_ops_get_func(ops);
@ -422,11 +398,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (!core_kernel_data((unsigned long)ops)) if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC; ops->flags |= FTRACE_OPS_FL_DYNAMIC;
if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
if (per_cpu_ops_alloc(ops))
return -ENOMEM;
}
add_ftrace_ops(&ftrace_ops_list, ops); add_ftrace_ops(&ftrace_ops_list, ops);
/* Always save the function, and reset at unregistering */ /* Always save the function, and reset at unregistering */
@ -2727,11 +2698,6 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{ {
} }
static void per_cpu_ops_free(struct ftrace_ops *ops)
{
free_percpu(ops->disabled);
}
static void ftrace_startup_enable(int command) static void ftrace_startup_enable(int command)
{ {
if (saved_ftrace_func != ftrace_trace_function) { if (saved_ftrace_func != ftrace_trace_function) {
@ -2833,7 +2799,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* not currently active, we can just free them * not currently active, we can just free them
* without synchronizing all CPUs. * without synchronizing all CPUs.
*/ */
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
goto free_ops; goto free_ops;
return 0; return 0;
@ -2880,7 +2846,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* The same goes for freeing the per_cpu data of the per_cpu * The same goes for freeing the per_cpu data of the per_cpu
* ops. * ops.
*/ */
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
/* /*
* We need to do a hard force of sched synchronization. * We need to do a hard force of sched synchronization.
* This is because we use preempt_disable() to do RCU, but * This is because we use preempt_disable() to do RCU, but
@ -2903,9 +2869,6 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
free_ops: free_ops:
arch_ftrace_trampoline_free(ops); arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_PER_CPU)
per_cpu_ops_free(ops);
} }
return 0; return 0;
@ -5672,10 +5635,29 @@ static int ftrace_process_locs(struct module *mod,
return ret; return ret;
} }
struct ftrace_mod_func {
struct list_head list;
char *name;
unsigned long ip;
unsigned int size;
};
struct ftrace_mod_map {
struct rcu_head rcu;
struct list_head list;
struct module *mod;
unsigned long start_addr;
unsigned long end_addr;
struct list_head funcs;
unsigned int num_funcs;
};
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
static LIST_HEAD(ftrace_mod_maps);
static int referenced_filters(struct dyn_ftrace *rec) static int referenced_filters(struct dyn_ftrace *rec)
{ {
struct ftrace_ops *ops; struct ftrace_ops *ops;
@ -5729,8 +5711,26 @@ static void clear_mod_from_hashes(struct ftrace_page *pg)
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
} }
static void ftrace_free_mod_map(struct rcu_head *rcu)
{
struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
struct ftrace_mod_func *mod_func;
struct ftrace_mod_func *n;
/* All the contents of mod_map are now not visible to readers */
list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
kfree(mod_func->name);
list_del(&mod_func->list);
kfree(mod_func);
}
kfree(mod_map);
}
void ftrace_release_mod(struct module *mod) void ftrace_release_mod(struct module *mod)
{ {
struct ftrace_mod_map *mod_map;
struct ftrace_mod_map *n;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
struct ftrace_page **last_pg; struct ftrace_page **last_pg;
struct ftrace_page *tmp_page = NULL; struct ftrace_page *tmp_page = NULL;
@ -5742,6 +5742,14 @@ void ftrace_release_mod(struct module *mod)
if (ftrace_disabled) if (ftrace_disabled)
goto out_unlock; goto out_unlock;
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list);
call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
break;
}
}
/* /*
* Each module has its own ftrace_pages, remove * Each module has its own ftrace_pages, remove
* them from the list. * them from the list.
@ -5749,7 +5757,8 @@ void ftrace_release_mod(struct module *mod)
last_pg = &ftrace_pages_start; last_pg = &ftrace_pages_start;
for (pg = ftrace_pages_start; pg; pg = *last_pg) { for (pg = ftrace_pages_start; pg; pg = *last_pg) {
rec = &pg->records[0]; rec = &pg->records[0];
if (within_module_core(rec->ip, mod)) { if (within_module_core(rec->ip, mod) ||
within_module_init(rec->ip, mod)) {
/* /*
* As core pages are first, the first * As core pages are first, the first
* page should never be a module page. * page should never be a module page.
@ -5818,7 +5827,8 @@ void ftrace_module_enable(struct module *mod)
* not part of this module, then skip this pg, * not part of this module, then skip this pg,
* which the "break" will do. * which the "break" will do.
*/ */
if (!within_module_core(rec->ip, mod)) if (!within_module_core(rec->ip, mod) &&
!within_module_init(rec->ip, mod))
break; break;
cnt = 0; cnt = 0;
@ -5863,23 +5873,245 @@ void ftrace_module_init(struct module *mod)
ftrace_process_locs(mod, mod->ftrace_callsites, ftrace_process_locs(mod, mod->ftrace_callsites,
mod->ftrace_callsites + mod->num_ftrace_callsites); mod->ftrace_callsites + mod->num_ftrace_callsites);
} }
static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
struct dyn_ftrace *rec)
{
struct ftrace_mod_func *mod_func;
unsigned long symsize;
unsigned long offset;
char str[KSYM_SYMBOL_LEN];
char *modname;
const char *ret;
ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
if (!ret)
return;
mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
if (!mod_func)
return;
mod_func->name = kstrdup(str, GFP_KERNEL);
if (!mod_func->name) {
kfree(mod_func);
return;
}
mod_func->ip = rec->ip - offset;
mod_func->size = symsize;
mod_map->num_funcs++;
list_add_rcu(&mod_func->list, &mod_map->funcs);
}
static struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module *mod,
unsigned long start, unsigned long end)
{
struct ftrace_mod_map *mod_map;
mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
if (!mod_map)
return NULL;
mod_map->mod = mod;
mod_map->start_addr = start;
mod_map->end_addr = end;
mod_map->num_funcs = 0;
INIT_LIST_HEAD_RCU(&mod_map->funcs);
list_add_rcu(&mod_map->list, &ftrace_mod_maps);
return mod_map;
}
static const char *
ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
struct ftrace_mod_func *found_func = NULL;
struct ftrace_mod_func *mod_func;
list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
if (addr >= mod_func->ip &&
addr < mod_func->ip + mod_func->size) {
found_func = mod_func;
break;
}
}
if (found_func) {
if (size)
*size = found_func->size;
if (off)
*off = addr - found_func->ip;
if (sym)
strlcpy(sym, found_func->name, KSYM_NAME_LEN);
return found_func->name;
}
return NULL;
}
const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
struct ftrace_mod_map *mod_map;
const char *ret = NULL;
/* mod_map is freed via call_rcu_sched() */
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
if (ret) {
if (modname)
*modname = mod_map->mod->name;
break;
}
}
preempt_enable();
return ret;
}
int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported)
{
struct ftrace_mod_map *mod_map;
struct ftrace_mod_func *mod_func;
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
if (symnum >= mod_map->num_funcs) {
symnum -= mod_map->num_funcs;
continue;
}
list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
if (symnum > 1) {
symnum--;
continue;
}
*value = mod_func->ip;
*type = 'T';
strlcpy(name, mod_func->name, KSYM_NAME_LEN);
strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
*exported = 1;
preempt_enable();
return 0;
}
WARN_ON(1);
break;
}
preempt_enable();
return -ERANGE;
}
#else
static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
struct dyn_ftrace *rec) { }
static inline struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module *mod,
unsigned long start, unsigned long end)
{
return NULL;
}
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
void __init ftrace_free_init_mem(void) struct ftrace_init_func {
struct list_head list;
unsigned long ip;
};
/* Clear any init ips from hashes */
static void
clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
{ {
unsigned long start = (unsigned long)(&__init_begin); struct ftrace_func_entry *entry;
unsigned long end = (unsigned long)(&__init_end);
if (ftrace_hash_empty(hash))
return;
entry = __ftrace_lookup_ip(hash, func->ip);
/*
* Do not allow this rec to match again.
* Yeah, it may waste some memory, but will be removed
* if/when the hash is modified again.
*/
if (entry)
entry->ip = 0;
}
static void
clear_func_from_hashes(struct ftrace_init_func *func)
{
struct trace_array *tr;
mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->ops || !tr->ops->func_hash)
continue;
mutex_lock(&tr->ops->func_hash->regex_lock);
clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
mutex_unlock(&tr->ops->func_hash->regex_lock);
}
mutex_unlock(&trace_types_lock);
}
static void add_to_clear_hash_list(struct list_head *clear_list,
struct dyn_ftrace *rec)
{
struct ftrace_init_func *func;
func = kmalloc(sizeof(*func), GFP_KERNEL);
if (!func) {
WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
return;
}
func->ip = rec->ip;
list_add(&func->list, clear_list);
}
void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
{
unsigned long start = (unsigned long)(start_ptr);
unsigned long end = (unsigned long)(end_ptr);
struct ftrace_page **last_pg = &ftrace_pages_start; struct ftrace_page **last_pg = &ftrace_pages_start;
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
struct dyn_ftrace key; struct dyn_ftrace key;
struct ftrace_mod_map *mod_map = NULL;
struct ftrace_init_func *func, *func_next;
struct list_head clear_hash;
int order; int order;
INIT_LIST_HEAD(&clear_hash);
key.ip = start; key.ip = start;
key.flags = end; /* overload flags, as it is unsigned long */ key.flags = end; /* overload flags, as it is unsigned long */
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
/*
* If we are freeing module init memory, then check if
* any tracer is active. If so, we need to save a mapping of
* the module functions being freed with the address.
*/
if (mod && ftrace_ops_list != &ftrace_list_end)
mod_map = allocate_ftrace_mod_map(mod, start, end);
for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
if (end < pg->records[0].ip || if (end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
@ -5890,6 +6122,13 @@ void __init ftrace_free_init_mem(void)
ftrace_cmp_recs); ftrace_cmp_recs);
if (!rec) if (!rec)
continue; continue;
/* rec will be cleared from hashes after ftrace_lock unlock */
add_to_clear_hash_list(&clear_hash, rec);
if (mod_map)
save_ftrace_mod_rec(mod_map, rec);
pg->index--; pg->index--;
ftrace_update_tot_cnt--; ftrace_update_tot_cnt--;
if (!pg->index) { if (!pg->index) {
@ -5908,6 +6147,19 @@ void __init ftrace_free_init_mem(void)
goto again; goto again;
} }
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
list_for_each_entry_safe(func, func_next, &clear_hash, list) {
clear_func_from_hashes(func);
kfree(func);
}
}
void __init ftrace_free_init_mem(void)
{
void *start = (void *)(&__init_begin);
void *end = (void *)(&__init_end);
ftrace_free_mem(NULL, start, end);
} }
void __init ftrace_init(void) void __init ftrace_init(void)
@ -6063,10 +6315,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
* If any of the above fails then the op->func() is not executed. * If any of the above fails then the op->func() is not executed.
*/ */
if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
(!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
!ftrace_function_local_disabled(op)) &&
ftrace_ops_test(op, ip, regs)) { ftrace_ops_test(op, ip, regs)) {
if (FTRACE_WARN_ON(!op->func)) { if (FTRACE_WARN_ON(!op->func)) {
pr_warn("op=%p %pS\n", op, op); pr_warn("op=%p %pS\n", op, op);
goto out; goto out;
@ -6124,10 +6373,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
preempt_disable_notrace(); preempt_disable_notrace();
if (!(op->flags & FTRACE_OPS_FL_PER_CPU) || op->func(ip, parent_ip, op, regs);
!ftrace_function_local_disabled(op)) {
op->func(ip, parent_ip, op, regs);
}
preempt_enable_notrace(); preempt_enable_notrace();
trace_clear_recursion(bit); trace_clear_recursion(bit);
@ -6151,7 +6397,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
* or does per cpu logic, then we need to call the assist handler. * or does per cpu logic, then we need to call the assist handler.
*/ */
if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU)) ops->flags & FTRACE_OPS_FL_RCU)
return ftrace_ops_assist_func; return ftrace_ops_assist_func;
return ops->func; return ops->func;

View File

@ -2536,61 +2536,29 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* The lock and unlock are done within a preempt disable section. * The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified * The current_context per_cpu variable can only be modified
* by the current task between lock and unlock. But it can * by the current task between lock and unlock. But it can
* be modified more than once via an interrupt. To pass this * be modified more than once via an interrupt. There are four
* information from the lock to the unlock without having to * different contexts that we need to consider.
* access the 'in_interrupt()' functions again (which do show
* a bit of overhead in something as critical as function tracing,
* we use a bitmask trick.
* *
* bit 0 = NMI context * Normal context.
* bit 1 = IRQ context * SoftIRQ context
* bit 2 = SoftIRQ context * IRQ context
* bit 3 = normal context. * NMI context
* *
* This works because this is the order of contexts that can * If for some reason the ring buffer starts to recurse, we
* preempt other contexts. A SoftIRQ never preempts an IRQ * only allow that to happen at most 4 times (one for each
* context. * context). If it happens 5 times, then we consider this a
* * recusive loop and do not let it go further.
* When the context is determined, the corresponding bit is
* checked and set (if it was set, then a recursion of that context
* happened).
*
* On unlock, we need to clear this bit. To do so, just subtract
* 1 from the current_context and AND it to itself.
*
* (binary)
* 101 - 1 = 100
* 101 & 100 = 100 (clearing bit zero)
*
* 1010 - 1 = 1001
* 1010 & 1001 = 1000 (clearing bit 1)
*
* The least significant bit can be cleared this way, and it
* just so happens that it is the same bit corresponding to
* the current context.
*/ */
static __always_inline int static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{ {
unsigned int val = cpu_buffer->current_context; if (cpu_buffer->current_context >= 4)
int bit;
if (in_interrupt()) {
if (in_nmi())
bit = RB_CTX_NMI;
else if (in_irq())
bit = RB_CTX_IRQ;
else
bit = RB_CTX_SOFTIRQ;
} else
bit = RB_CTX_NORMAL;
if (unlikely(val & (1 << bit)))
return 1; return 1;
val |= (1 << bit); cpu_buffer->current_context++;
cpu_buffer->current_context = val; /* Interrupts must see this update */
barrier();
return 0; return 0;
} }
@ -2598,7 +2566,9 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
static __always_inline void static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{ {
cpu_buffer->current_context &= cpu_buffer->current_context - 1; /* Don't let the dec leak out */
barrier();
cpu_buffer->current_context--;
} }
/** /**

View File

@ -7687,6 +7687,7 @@ static int instance_mkdir(const char *name)
struct trace_array *tr; struct trace_array *tr;
int ret; int ret;
mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
ret = -EEXIST; ret = -EEXIST;
@ -7742,6 +7743,7 @@ static int instance_mkdir(const char *name)
list_add(&tr->list, &ftrace_trace_arrays); list_add(&tr->list, &ftrace_trace_arrays);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return 0; return 0;
@ -7753,6 +7755,7 @@ static int instance_mkdir(const char *name)
out_unlock: out_unlock:
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return ret; return ret;
@ -7765,6 +7768,7 @@ static int instance_rmdir(const char *name)
int ret; int ret;
int i; int i;
mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
ret = -ENODEV; ret = -ENODEV;
@ -7810,6 +7814,7 @@ static int instance_rmdir(const char *name)
out_unlock: out_unlock:
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -8276,6 +8281,92 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
} }
EXPORT_SYMBOL_GPL(ftrace_dump); EXPORT_SYMBOL_GPL(ftrace_dump);
int trace_run_command(const char *buf, int (*createfn)(int, char **))
{
char **argv;
int argc, ret;
argc = 0;
ret = 0;
argv = argv_split(GFP_KERNEL, buf, &argc);
if (!argv)
return -ENOMEM;
if (argc)
ret = createfn(argc, argv);
argv_free(argv);
return ret;
}
#define WRITE_BUFSIZE 4096
ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos,
int (*createfn)(int, char **))
{
char *kbuf, *buf, *tmp;
int ret = 0;
size_t done = 0;
size_t size;
kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
while (done < count) {
size = count - done;
if (size >= WRITE_BUFSIZE)
size = WRITE_BUFSIZE - 1;
if (copy_from_user(kbuf, buffer + done, size)) {
ret = -EFAULT;
goto out;
}
kbuf[size] = '\0';
buf = kbuf;
do {
tmp = strchr(buf, '\n');
if (tmp) {
*tmp = '\0';
size = tmp - buf + 1;
} else {
size = strlen(buf);
if (done + size < count) {
if (buf != kbuf)
break;
/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
pr_warn("Line length is too long: Should be less than %d\n",
WRITE_BUFSIZE - 2);
ret = -EINVAL;
goto out;
}
}
done += size;
/* Remove comments */
tmp = strchr(buf, '#');
if (tmp)
*tmp = '\0';
ret = trace_run_command(buf, createfn);
if (ret)
goto out;
buf += size;
} while (done < count);
}
ret = done;
out:
kfree(kbuf);
return ret;
}
__init static int tracer_alloc_buffers(void) __init static int tracer_alloc_buffers(void)
{ {
int ring_buf_size; int ring_buf_size;

View File

@ -739,8 +739,6 @@ extern int trace_selftest_startup_wakeup(struct tracer *trace,
struct trace_array *tr); struct trace_array *tr);
extern int trace_selftest_startup_nop(struct tracer *trace, extern int trace_selftest_startup_nop(struct tracer *trace,
struct trace_array *tr); struct trace_array *tr);
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_branch(struct tracer *trace, extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr); struct trace_array *tr);
/* /*
@ -1755,6 +1753,13 @@ void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
#define MAX_EVENT_NAME_LEN 64
extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
extern ssize_t trace_parse_run_command(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos,
int (*createfn)(int, char**));
/* /*
* Normal trace_printk() and friends allocates special buffers * Normal trace_printk() and friends allocates special buffers
* to do the manipulation, as well as saves the print formats * to do the manipulation, as well as saves the print formats

View File

@ -240,27 +240,41 @@ void perf_trace_destroy(struct perf_event *p_event)
int perf_trace_add(struct perf_event *p_event, int flags) int perf_trace_add(struct perf_event *p_event, int flags)
{ {
struct trace_event_call *tp_event = p_event->tp_event; struct trace_event_call *tp_event = p_event->tp_event;
struct hlist_head __percpu *pcpu_list;
struct hlist_head *list;
pcpu_list = tp_event->perf_events;
if (WARN_ON_ONCE(!pcpu_list))
return -EINVAL;
if (!(flags & PERF_EF_START)) if (!(flags & PERF_EF_START))
p_event->hw.state = PERF_HES_STOPPED; p_event->hw.state = PERF_HES_STOPPED;
list = this_cpu_ptr(pcpu_list); /*
hlist_add_head_rcu(&p_event->hlist_entry, list); * If TRACE_REG_PERF_ADD returns false; no custom action was performed
* and we need to take the default action of enqueueing our event on
* the right per-cpu hlist.
*/
if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
struct hlist_head __percpu *pcpu_list;
struct hlist_head *list;
return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event); pcpu_list = tp_event->perf_events;
if (WARN_ON_ONCE(!pcpu_list))
return -EINVAL;
list = this_cpu_ptr(pcpu_list);
hlist_add_head_rcu(&p_event->hlist_entry, list);
}
return 0;
} }
void perf_trace_del(struct perf_event *p_event, int flags) void perf_trace_del(struct perf_event *p_event, int flags)
{ {
struct trace_event_call *tp_event = p_event->tp_event; struct trace_event_call *tp_event = p_event->tp_event;
hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); /*
* If TRACE_REG_PERF_DEL returns false; no custom action was performed
* and we need to take the default action of dequeueing our event from
* the right per-cpu hlist.
*/
if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
hlist_del_rcu(&p_event->hlist_entry);
} }
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
@ -306,16 +320,25 @@ static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *pt_regs) struct ftrace_ops *ops, struct pt_regs *pt_regs)
{ {
struct perf_event *event;
struct ftrace_entry *entry; struct ftrace_entry *entry;
struct hlist_head *head; struct perf_event *event;
struct hlist_head head;
struct pt_regs regs; struct pt_regs regs;
int rctx; int rctx;
head = this_cpu_ptr(event_function.perf_events); if ((unsigned long)ops->private != smp_processor_id())
if (hlist_empty(head))
return; return;
event = container_of(ops, struct perf_event, ftrace_ops);
/*
* @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
* the perf code does is hlist_for_each_entry_rcu(), so we can
* get away with simply setting the @head.first pointer in order
* to create a singular list.
*/
head.first = &event->hlist_entry;
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
sizeof(u64)) - sizeof(u32)) sizeof(u64)) - sizeof(u32))
@ -330,9 +353,8 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
event = container_of(ops, struct perf_event, ftrace_ops);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN, perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
1, &regs, head, NULL, event); 1, &regs, &head, NULL);
#undef ENTRY_SIZE #undef ENTRY_SIZE
} }
@ -341,8 +363,10 @@ static int perf_ftrace_function_register(struct perf_event *event)
{ {
struct ftrace_ops *ops = &event->ftrace_ops; struct ftrace_ops *ops = &event->ftrace_ops;
ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU; ops->flags = FTRACE_OPS_FL_RCU;
ops->func = perf_ftrace_function_call; ops->func = perf_ftrace_function_call;
ops->private = (void *)(unsigned long)nr_cpu_ids;
return register_ftrace_function(ops); return register_ftrace_function(ops);
} }
@ -354,19 +378,11 @@ static int perf_ftrace_function_unregister(struct perf_event *event)
return ret; return ret;
} }
static void perf_ftrace_function_enable(struct perf_event *event)
{
ftrace_function_local_enable(&event->ftrace_ops);
}
static void perf_ftrace_function_disable(struct perf_event *event)
{
ftrace_function_local_disable(&event->ftrace_ops);
}
int perf_ftrace_event_register(struct trace_event_call *call, int perf_ftrace_event_register(struct trace_event_call *call,
enum trace_reg type, void *data) enum trace_reg type, void *data)
{ {
struct perf_event *event = data;
switch (type) { switch (type) {
case TRACE_REG_REGISTER: case TRACE_REG_REGISTER:
case TRACE_REG_UNREGISTER: case TRACE_REG_UNREGISTER:
@ -379,11 +395,11 @@ int perf_ftrace_event_register(struct trace_event_call *call,
case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_CLOSE:
return perf_ftrace_function_unregister(data); return perf_ftrace_function_unregister(data);
case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_ADD:
perf_ftrace_function_enable(data); event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
return 0; return 1;
case TRACE_REG_PERF_DEL: case TRACE_REG_PERF_DEL:
perf_ftrace_function_disable(data); event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
return 0; return 1;
} }
return -EINVAL; return -EINVAL;

View File

@ -1406,8 +1406,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
return -ENODEV; return -ENODEV;
/* Make sure the system still exists */ /* Make sure the system still exists */
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(tr, &ftrace_trace_arrays, list) {
list_for_each_entry(dir, &tr->systems, list) { list_for_each_entry(dir, &tr->systems, list) {
if (dir == inode->i_private) { if (dir == inode->i_private) {
@ -1421,8 +1421,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
} }
} }
exit_loop: exit_loop:
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
if (!system) if (!system)
return -ENODEV; return -ENODEV;
@ -2294,15 +2294,15 @@ static void __add_event_to_tracers(struct trace_event_call *call);
int trace_add_event_call(struct trace_event_call *call) int trace_add_event_call(struct trace_event_call *call)
{ {
int ret; int ret;
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
ret = __register_event(call, NULL); ret = __register_event(call, NULL);
if (ret >= 0) if (ret >= 0)
__add_event_to_tracers(call); __add_event_to_tracers(call);
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -2356,13 +2356,13 @@ int trace_remove_event_call(struct trace_event_call *call)
{ {
int ret; int ret;
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
down_write(&trace_event_sem); down_write(&trace_event_sem);
ret = probe_remove_event_call(call); ret = probe_remove_event_call(call);
up_write(&trace_event_sem); up_write(&trace_event_sem);
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -2424,8 +2424,8 @@ static int trace_module_notify(struct notifier_block *self,
{ {
struct module *mod = data; struct module *mod = data;
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
switch (val) { switch (val) {
case MODULE_STATE_COMING: case MODULE_STATE_COMING:
trace_module_add_events(mod); trace_module_add_events(mod);
@ -2434,8 +2434,8 @@ static int trace_module_notify(struct notifier_block *self,
trace_module_remove_events(mod); trace_module_remove_events(mod);
break; break;
} }
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return 0; return 0;
} }
@ -2950,24 +2950,24 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
* creates the event hierachry in the @parent/events directory. * creates the event hierachry in the @parent/events directory.
* *
* Returns 0 on success. * Returns 0 on success.
*
* Must be called with event_mutex held.
*/ */
int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
{ {
int ret; int ret;
mutex_lock(&event_mutex); lockdep_assert_held(&event_mutex);
ret = create_event_toplevel_files(parent, tr); ret = create_event_toplevel_files(parent, tr);
if (ret) if (ret)
goto out_unlock; goto out;
down_write(&trace_event_sem); down_write(&trace_event_sem);
__trace_add_event_dirs(tr); __trace_add_event_dirs(tr);
up_write(&trace_event_sem); up_write(&trace_event_sem);
out_unlock: out:
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -2996,9 +2996,10 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
return ret; return ret;
} }
/* Must be called with event_mutex held */
int event_trace_del_tracer(struct trace_array *tr) int event_trace_del_tracer(struct trace_array *tr)
{ {
mutex_lock(&event_mutex); lockdep_assert_held(&event_mutex);
/* Disable any event triggers and associated soft-disabled events */ /* Disable any event triggers and associated soft-disabled events */
clear_event_triggers(tr); clear_event_triggers(tr);
@ -3019,8 +3020,6 @@ int event_trace_del_tracer(struct trace_array *tr)
tr->event_dir = NULL; tr->event_dir = NULL;
mutex_unlock(&event_mutex);
return 0; return 0;
} }

View File

@ -28,12 +28,16 @@ struct hist_field;
typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event); typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
#define HIST_FIELD_OPERANDS_MAX 2
struct hist_field { struct hist_field {
struct ftrace_event_field *field; struct ftrace_event_field *field;
unsigned long flags; unsigned long flags;
hist_field_fn_t fn; hist_field_fn_t fn;
unsigned int size; unsigned int size;
unsigned int offset; unsigned int offset;
unsigned int is_signed;
struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
}; };
static u64 hist_field_none(struct hist_field *field, void *event) static u64 hist_field_none(struct hist_field *field, void *event)
@ -71,7 +75,9 @@ static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
static u64 hist_field_log2(struct hist_field *hist_field, void *event) static u64 hist_field_log2(struct hist_field *hist_field, void *event)
{ {
u64 val = *(u64 *)(event + hist_field->field->offset); struct hist_field *operand = hist_field->operands[0];
u64 val = operand->fn(operand, event);
return (u64) ilog2(roundup_pow_of_two(val)); return (u64) ilog2(roundup_pow_of_two(val));
} }
@ -110,16 +116,16 @@ DEFINE_HIST_FIELD_FN(u8);
#define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
enum hist_field_flags { enum hist_field_flags {
HIST_FIELD_FL_HITCOUNT = 1, HIST_FIELD_FL_HITCOUNT = 1 << 0,
HIST_FIELD_FL_KEY = 2, HIST_FIELD_FL_KEY = 1 << 1,
HIST_FIELD_FL_STRING = 4, HIST_FIELD_FL_STRING = 1 << 2,
HIST_FIELD_FL_HEX = 8, HIST_FIELD_FL_HEX = 1 << 3,
HIST_FIELD_FL_SYM = 16, HIST_FIELD_FL_SYM = 1 << 4,
HIST_FIELD_FL_SYM_OFFSET = 32, HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
HIST_FIELD_FL_EXECNAME = 64, HIST_FIELD_FL_EXECNAME = 1 << 6,
HIST_FIELD_FL_SYSCALL = 128, HIST_FIELD_FL_SYSCALL = 1 << 7,
HIST_FIELD_FL_STACKTRACE = 256, HIST_FIELD_FL_STACKTRACE = 1 << 8,
HIST_FIELD_FL_LOG2 = 512, HIST_FIELD_FL_LOG2 = 1 << 9,
}; };
struct hist_trigger_attrs { struct hist_trigger_attrs {
@ -146,6 +152,25 @@ struct hist_trigger_data {
struct tracing_map *map; struct tracing_map *map;
}; };
static const char *hist_field_name(struct hist_field *field,
unsigned int level)
{
const char *field_name = "";
if (level > 1)
return field_name;
if (field->field)
field_name = field->field->name;
else if (field->flags & HIST_FIELD_FL_LOG2)
field_name = hist_field_name(field->operands[0], ++level);
if (field_name == NULL)
field_name = "";
return field_name;
}
static hist_field_fn_t select_value_fn(int field_size, int field_is_signed) static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
{ {
hist_field_fn_t fn = NULL; hist_field_fn_t fn = NULL;
@ -340,8 +365,20 @@ static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
.elt_init = hist_trigger_elt_comm_init, .elt_init = hist_trigger_elt_comm_init,
}; };
static void destroy_hist_field(struct hist_field *hist_field) static void destroy_hist_field(struct hist_field *hist_field,
unsigned int level)
{ {
unsigned int i;
if (level > 2)
return;
if (!hist_field)
return;
for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
destroy_hist_field(hist_field->operands[i], level + 1);
kfree(hist_field); kfree(hist_field);
} }
@ -368,7 +405,10 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
} }
if (flags & HIST_FIELD_FL_LOG2) { if (flags & HIST_FIELD_FL_LOG2) {
unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
hist_field->fn = hist_field_log2; hist_field->fn = hist_field_log2;
hist_field->operands[0] = create_hist_field(field, fl);
hist_field->size = hist_field->operands[0]->size;
goto out; goto out;
} }
@ -388,7 +428,7 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
hist_field->fn = select_value_fn(field->size, hist_field->fn = select_value_fn(field->size,
field->is_signed); field->is_signed);
if (!hist_field->fn) { if (!hist_field->fn) {
destroy_hist_field(hist_field); destroy_hist_field(hist_field, 0);
return NULL; return NULL;
} }
} }
@ -405,7 +445,7 @@ static void destroy_hist_fields(struct hist_trigger_data *hist_data)
for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) { for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
if (hist_data->fields[i]) { if (hist_data->fields[i]) {
destroy_hist_field(hist_data->fields[i]); destroy_hist_field(hist_data->fields[i], 0);
hist_data->fields[i] = NULL; hist_data->fields[i] = NULL;
} }
} }
@ -450,7 +490,7 @@ static int create_val_field(struct hist_trigger_data *hist_data,
} }
field = trace_find_event_field(file->event_call, field_name); field = trace_find_event_field(file->event_call, field_name);
if (!field) { if (!field || !field->size) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
@ -548,7 +588,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
} }
field = trace_find_event_field(file->event_call, field_name); field = trace_find_event_field(file->event_call, field_name);
if (!field) { if (!field || !field->size) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
@ -653,7 +693,6 @@ static int is_descending(const char *str)
static int create_sort_keys(struct hist_trigger_data *hist_data) static int create_sort_keys(struct hist_trigger_data *hist_data)
{ {
char *fields_str = hist_data->attrs->sort_key_str; char *fields_str = hist_data->attrs->sort_key_str;
struct ftrace_event_field *field = NULL;
struct tracing_map_sort_key *sort_key; struct tracing_map_sort_key *sort_key;
int descending, ret = 0; int descending, ret = 0;
unsigned int i, j; unsigned int i, j;
@ -670,7 +709,9 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
} }
for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
struct hist_field *hist_field;
char *field_str, *field_name; char *field_str, *field_name;
const char *test_name;
sort_key = &hist_data->sort_keys[i]; sort_key = &hist_data->sort_keys[i];
@ -703,8 +744,10 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
} }
for (j = 1; j < hist_data->n_fields; j++) { for (j = 1; j < hist_data->n_fields; j++) {
field = hist_data->fields[j]->field; hist_field = hist_data->fields[j];
if (field && (strcmp(field_name, field->name) == 0)) { test_name = hist_field_name(hist_field, 0);
if (strcmp(field_name, test_name) == 0) {
sort_key->field_idx = j; sort_key->field_idx = j;
descending = is_descending(field_str); descending = is_descending(field_str);
if (descending < 0) { if (descending < 0) {
@ -952,6 +995,7 @@ hist_trigger_entry_print(struct seq_file *m,
struct hist_field *key_field; struct hist_field *key_field;
char str[KSYM_SYMBOL_LEN]; char str[KSYM_SYMBOL_LEN];
bool multiline = false; bool multiline = false;
const char *field_name;
unsigned int i; unsigned int i;
u64 uval; u64 uval;
@ -963,26 +1007,27 @@ hist_trigger_entry_print(struct seq_file *m,
if (i > hist_data->n_vals) if (i > hist_data->n_vals)
seq_puts(m, ", "); seq_puts(m, ", ");
field_name = hist_field_name(key_field, 0);
if (key_field->flags & HIST_FIELD_FL_HEX) { if (key_field->flags & HIST_FIELD_FL_HEX) {
uval = *(u64 *)(key + key_field->offset); uval = *(u64 *)(key + key_field->offset);
seq_printf(m, "%s: %llx", seq_printf(m, "%s: %llx", field_name, uval);
key_field->field->name, uval);
} else if (key_field->flags & HIST_FIELD_FL_SYM) { } else if (key_field->flags & HIST_FIELD_FL_SYM) {
uval = *(u64 *)(key + key_field->offset); uval = *(u64 *)(key + key_field->offset);
sprint_symbol_no_offset(str, uval); sprint_symbol_no_offset(str, uval);
seq_printf(m, "%s: [%llx] %-45s", seq_printf(m, "%s: [%llx] %-45s", field_name,
key_field->field->name, uval, str); uval, str);
} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
uval = *(u64 *)(key + key_field->offset); uval = *(u64 *)(key + key_field->offset);
sprint_symbol(str, uval); sprint_symbol(str, uval);
seq_printf(m, "%s: [%llx] %-55s", seq_printf(m, "%s: [%llx] %-55s", field_name,
key_field->field->name, uval, str); uval, str);
} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
char *comm = elt->private_data; char *comm = elt->private_data;
uval = *(u64 *)(key + key_field->offset); uval = *(u64 *)(key + key_field->offset);
seq_printf(m, "%s: %-16s[%10llu]", seq_printf(m, "%s: %-16s[%10llu]", field_name,
key_field->field->name, comm, uval); comm, uval);
} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
const char *syscall_name; const char *syscall_name;
@ -991,8 +1036,8 @@ hist_trigger_entry_print(struct seq_file *m,
if (!syscall_name) if (!syscall_name)
syscall_name = "unknown_syscall"; syscall_name = "unknown_syscall";
seq_printf(m, "%s: %-30s[%3llu]", seq_printf(m, "%s: %-30s[%3llu]", field_name,
key_field->field->name, syscall_name, uval); syscall_name, uval);
} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
seq_puts(m, "stacktrace:\n"); seq_puts(m, "stacktrace:\n");
hist_trigger_stacktrace_print(m, hist_trigger_stacktrace_print(m,
@ -1000,15 +1045,14 @@ hist_trigger_entry_print(struct seq_file *m,
HIST_STACKTRACE_DEPTH); HIST_STACKTRACE_DEPTH);
multiline = true; multiline = true;
} else if (key_field->flags & HIST_FIELD_FL_LOG2) { } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name, seq_printf(m, "%s: ~ 2^%-2llu", field_name,
*(u64 *)(key + key_field->offset)); *(u64 *)(key + key_field->offset));
} else if (key_field->flags & HIST_FIELD_FL_STRING) { } else if (key_field->flags & HIST_FIELD_FL_STRING) {
seq_printf(m, "%s: %-50s", key_field->field->name, seq_printf(m, "%s: %-50s", field_name,
(char *)(key + key_field->offset)); (char *)(key + key_field->offset));
} else { } else {
uval = *(u64 *)(key + key_field->offset); uval = *(u64 *)(key + key_field->offset);
seq_printf(m, "%s: %10llu", key_field->field->name, seq_printf(m, "%s: %10llu", field_name, uval);
uval);
} }
} }
@ -1021,13 +1065,13 @@ hist_trigger_entry_print(struct seq_file *m,
tracing_map_read_sum(elt, HITCOUNT_IDX)); tracing_map_read_sum(elt, HITCOUNT_IDX));
for (i = 1; i < hist_data->n_vals; i++) { for (i = 1; i < hist_data->n_vals; i++) {
field_name = hist_field_name(hist_data->fields[i], 0);
if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
seq_printf(m, " %s: %10llx", seq_printf(m, " %s: %10llx", field_name,
hist_data->fields[i]->field->name,
tracing_map_read_sum(elt, i)); tracing_map_read_sum(elt, i));
} else { } else {
seq_printf(m, " %s: %10llu", seq_printf(m, " %s: %10llu", field_name,
hist_data->fields[i]->field->name,
tracing_map_read_sum(elt, i)); tracing_map_read_sum(elt, i));
} }
} }
@ -1062,7 +1106,7 @@ static void hist_trigger_show(struct seq_file *m,
struct event_trigger_data *data, int n) struct event_trigger_data *data, int n)
{ {
struct hist_trigger_data *hist_data; struct hist_trigger_data *hist_data;
int n_entries, ret = 0; int n_entries;
if (n > 0) if (n > 0)
seq_puts(m, "\n\n"); seq_puts(m, "\n\n");
@ -1073,10 +1117,8 @@ static void hist_trigger_show(struct seq_file *m,
hist_data = data->private_data; hist_data = data->private_data;
n_entries = print_entries(m, hist_data); n_entries = print_entries(m, hist_data);
if (n_entries < 0) { if (n_entries < 0)
ret = n_entries;
n_entries = 0; n_entries = 0;
}
seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
(u64)atomic64_read(&hist_data->map->hits), (u64)atomic64_read(&hist_data->map->hits),
@ -1142,7 +1184,9 @@ static const char *get_hist_field_flags(struct hist_field *hist_field)
static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
{ {
seq_printf(m, "%s", hist_field->field->name); const char *field_name = hist_field_name(hist_field, 0);
seq_printf(m, "%s", field_name);
if (hist_field->flags) { if (hist_field->flags) {
const char *flags_str = get_hist_field_flags(hist_field); const char *flags_str = get_hist_field_flags(hist_field);

View File

@ -16,6 +16,10 @@
#include "trace.h" #include "trace.h"
#define CREATE_TRACE_POINTS
#include <trace/events/preemptirq.h>
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
static struct trace_array *irqsoff_trace __read_mostly; static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly; static int tracer_enabled __read_mostly;
@ -462,64 +466,44 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
#else /* !CONFIG_PROVE_LOCKING */ #else /* !CONFIG_PROVE_LOCKING */
/*
* Stubs:
*/
void trace_softirqs_on(unsigned long ip)
{
}
void trace_softirqs_off(unsigned long ip)
{
}
inline void print_irqtrace_events(struct task_struct *curr)
{
}
/* /*
* We are only interested in hardirq on/off events: * We are only interested in hardirq on/off events:
*/ */
void trace_hardirqs_on(void) static inline void tracer_hardirqs_on(void)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }
EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void) static inline void tracer_hardirqs_off(void)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }
EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr) static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr); stop_critical_timing(CALLER_ADDR0, caller_addr);
} }
EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr) static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr); start_critical_timing(CALLER_ADDR0, caller_addr);
} }
EXPORT_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_PROVE_LOCKING */ #endif /* CONFIG_PROVE_LOCKING */
#endif /* CONFIG_IRQSOFF_TRACER */ #endif /* CONFIG_IRQSOFF_TRACER */
#ifdef CONFIG_PREEMPT_TRACER #ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1) static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace() && !irq_trace()) if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1); stop_critical_timing(a0, a1);
} }
void trace_preempt_off(unsigned long a0, unsigned long a1) static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace() && !irq_trace()) if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1); start_critical_timing(a0, a1);
@ -781,3 +765,100 @@ __init static int init_irqsoff_tracer(void)
return 0; return 0;
} }
core_initcall(init_irqsoff_tracer); core_initcall(init_irqsoff_tracer);
#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
#ifndef CONFIG_IRQSOFF_TRACER
static inline void tracer_hardirqs_on(void) { }
static inline void tracer_hardirqs_off(void) { }
static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
#endif
#ifndef CONFIG_PREEMPT_TRACER
static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
/* Per-cpu variable to prevent redundant calls when IRQs already off */
static DEFINE_PER_CPU(int, tracing_irq_cpu);
void trace_hardirqs_on(void)
{
if (!this_cpu_read(tracing_irq_cpu))
return;
trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on();
this_cpu_write(tracing_irq_cpu, 0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void)
{
if (this_cpu_read(tracing_irq_cpu))
return;
this_cpu_write(tracing_irq_cpu, 1);
trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_off();
}
EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
if (!this_cpu_read(tracing_irq_cpu))
return;
trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
tracer_hardirqs_on_caller(caller_addr);
this_cpu_write(tracing_irq_cpu, 0);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (this_cpu_read(tracing_irq_cpu))
return;
this_cpu_write(tracing_irq_cpu, 1);
trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
tracer_hardirqs_off_caller(caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
/*
* Stubs:
*/
void trace_softirqs_on(unsigned long ip)
{
}
void trace_softirqs_off(unsigned long ip)
{
}
inline void print_irqtrace_events(struct task_struct *curr)
{
}
#endif
#if defined(CONFIG_PREEMPT_TRACER) || \
(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
trace_preempt_enable_rcuidle(a0, a1);
tracer_preempt_on(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
trace_preempt_disable_rcuidle(a0, a1);
tracer_preempt_off(a0, a1);
}
#endif

View File

@ -907,8 +907,8 @@ static int probes_open(struct inode *inode, struct file *file)
static ssize_t probes_write(struct file *file, const char __user *buffer, static ssize_t probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return traceprobe_probes_write(file, buffer, count, ppos, return trace_parse_run_command(file, buffer, count, ppos,
create_trace_kprobe); create_trace_kprobe);
} }
static const struct file_operations kprobe_events_ops = { static const struct file_operations kprobe_events_ops = {
@ -1199,7 +1199,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
memset(&entry[1], 0, dsize); memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL, NULL); head, NULL);
} }
NOKPROBE_SYMBOL(kprobe_perf_func); NOKPROBE_SYMBOL(kprobe_perf_func);
@ -1234,7 +1234,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->ret_ip = (unsigned long)ri->ret_addr; entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL, NULL); head, NULL);
} }
NOKPROBE_SYMBOL(kretprobe_perf_func); NOKPROBE_SYMBOL(kretprobe_perf_func);
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
@ -1431,9 +1431,9 @@ static __init int kprobe_trace_self_tests_init(void)
pr_info("Testing kprobe tracing: "); pr_info("Testing kprobe tracing: ");
ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
"$stack $stack0 +0($stack)", "$stack $stack0 +0($stack)",
create_trace_kprobe); create_trace_kprobe);
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function entry.\n"); pr_warn("error on probing function entry.\n");
warn++; warn++;
@ -1453,8 +1453,8 @@ static __init int kprobe_trace_self_tests_init(void)
} }
} }
ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
"$retval", create_trace_kprobe); "$retval", create_trace_kprobe);
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function return.\n"); pr_warn("error on probing function return.\n");
warn++; warn++;
@ -1524,13 +1524,13 @@ static __init int kprobe_trace_self_tests_init(void)
disable_trace_kprobe(tk, file); disable_trace_kprobe(tk, file);
} }
ret = traceprobe_command("-:testprobe", create_trace_kprobe); ret = trace_run_command("-:testprobe", create_trace_kprobe);
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on deleting a probe.\n"); pr_warn("error on deleting a probe.\n");
warn++; warn++;
} }
ret = traceprobe_command("-:testprobe2", create_trace_kprobe); ret = trace_run_command("-:testprobe2", create_trace_kprobe);
if (WARN_ON_ONCE(ret)) { if (WARN_ON_ONCE(ret)) {
pr_warn("error on deleting a probe.\n"); pr_warn("error on deleting a probe.\n");
warn++; warn++;

View File

@ -623,92 +623,6 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
kfree(arg->comm); kfree(arg->comm);
} }
int traceprobe_command(const char *buf, int (*createfn)(int, char **))
{
char **argv;
int argc, ret;
argc = 0;
ret = 0;
argv = argv_split(GFP_KERNEL, buf, &argc);
if (!argv)
return -ENOMEM;
if (argc)
ret = createfn(argc, argv);
argv_free(argv);
return ret;
}
#define WRITE_BUFSIZE 4096
ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos,
int (*createfn)(int, char **))
{
char *kbuf, *buf, *tmp;
int ret = 0;
size_t done = 0;
size_t size;
kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
while (done < count) {
size = count - done;
if (size >= WRITE_BUFSIZE)
size = WRITE_BUFSIZE - 1;
if (copy_from_user(kbuf, buffer + done, size)) {
ret = -EFAULT;
goto out;
}
kbuf[size] = '\0';
buf = kbuf;
do {
tmp = strchr(buf, '\n');
if (tmp) {
*tmp = '\0';
size = tmp - buf + 1;
} else {
size = strlen(buf);
if (done + size < count) {
if (buf != kbuf)
break;
/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
pr_warn("Line length is too long: Should be less than %d\n",
WRITE_BUFSIZE - 2);
ret = -EINVAL;
goto out;
}
}
done += size;
/* Remove comments */
tmp = strchr(buf, '#');
if (tmp)
*tmp = '\0';
ret = traceprobe_command(buf, createfn);
if (ret)
goto out;
buf += size;
} while (done < count);
}
ret = done;
out:
kfree(kbuf);
return ret;
}
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
bool is_return) bool is_return)
{ {

View File

@ -42,7 +42,6 @@
#define MAX_TRACE_ARGS 128 #define MAX_TRACE_ARGS 128
#define MAX_ARGSTR_LEN 63 #define MAX_ARGSTR_LEN 63
#define MAX_EVENT_NAME_LEN 64
#define MAX_STRING_SIZE PATH_MAX #define MAX_STRING_SIZE PATH_MAX
/* Reserved field names */ /* Reserved field names */
@ -356,12 +355,6 @@ extern void traceprobe_free_probe_arg(struct probe_arg *arg);
extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset); extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
extern ssize_t traceprobe_probes_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos,
int (*createfn)(int, char**));
extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
/* Sum up total data length for dynamic arraies (strings) */ /* Sum up total data length for dynamic arraies (strings) */
static nokprobe_inline int static nokprobe_inline int
__get_data_size(struct trace_probe *tp, struct pt_regs *regs) __get_data_size(struct trace_probe *tp, struct pt_regs *regs)

View File

@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
* Test the trace buffer to see if all the elements * Test the trace buffer to see if all the elements
* are still sane. * are still sane.
*/ */
static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
{ {
unsigned long flags, cnt = 0; unsigned long flags, cnt = 0;
int cpu, ret = 0; int cpu, ret = 0;
@ -1151,38 +1151,6 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
} }
#endif /* CONFIG_SCHED_TRACER */ #endif /* CONFIG_SCHED_TRACER */
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
int
trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
{
unsigned long count;
int ret;
/* start the tracing */
ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
}
/* Sleep for a 1/10 of a second */
msleep(100);
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr);
tracing_start();
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
ret = -1;
}
return ret;
}
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
#ifdef CONFIG_BRANCH_TRACER #ifdef CONFIG_BRANCH_TRACER
int int
trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)

View File

@ -625,7 +625,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
perf_trace_buf_submit(rec, size, rctx, perf_trace_buf_submit(rec, size, rctx,
sys_data->enter_event->event.type, 1, regs, sys_data->enter_event->event.type, 1, regs,
head, NULL, NULL); head, NULL);
} }
static int perf_sysenter_enable(struct trace_event_call *call) static int perf_sysenter_enable(struct trace_event_call *call)
@ -721,7 +721,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
} }
perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
1, regs, head, NULL, NULL); 1, regs, head, NULL);
} }
static int perf_sysexit_enable(struct trace_event_call *call) static int perf_sysexit_enable(struct trace_event_call *call)

View File

@ -651,7 +651,7 @@ static int probes_open(struct inode *inode, struct file *file)
static ssize_t probes_write(struct file *file, const char __user *buffer, static ssize_t probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe); return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
} }
static const struct file_operations uprobe_events_ops = { static const struct file_operations uprobe_events_ops = {
@ -1155,7 +1155,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
} }
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL, NULL); head, NULL);
out: out:
preempt_enable(); preempt_enable();
} }

View File

@ -428,7 +428,8 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
if (test_key && test_key == key_hash && entry->val && if (test_key && test_key == key_hash && entry->val &&
keys_match(key, entry->val->key, map->key_size)) { keys_match(key, entry->val->key, map->key_size)) {
atomic64_inc(&map->hits); if (!lookup_only)
atomic64_inc(&map->hits);
return entry->val; return entry->val;
} }

View File

@ -6,7 +6,7 @@
#define TRACING_MAP_BITS_MAX 17 #define TRACING_MAP_BITS_MAX 17
#define TRACING_MAP_BITS_MIN 7 #define TRACING_MAP_BITS_MIN 7
#define TRACING_MAP_KEYS_MAX 2 #define TRACING_MAP_KEYS_MAX 3
#define TRACING_MAP_VALS_MAX 3 #define TRACING_MAP_VALS_MAX 3
#define TRACING_MAP_FIELDS_MAX (TRACING_MAP_KEYS_MAX + \ #define TRACING_MAP_FIELDS_MAX (TRACING_MAP_KEYS_MAX + \
TRACING_MAP_VALS_MAX) TRACING_MAP_VALS_MAX)