forked from Minki/linux
Various fix ups.
- Comment fixes - Build fix - Better memory alloction (don't use NR_CPUS) - Configuration fix - Build warning fix - Enhanced callback parameter (to simplify users of trace hooks) - Give up on stack tracing when RCU isn't watching (it's a lost cause) -----BEGIN PGP SIGNATURE----- iQHIBAABCgAyFiEEPm6V/WuN2kyArTUe1a05Y9njSUkFAlozKm0UHHJvc3RlZHRA Z29vZG1pcy5vcmcACgkQ1a05Y9njSUmhXwv7BEY923K3Nl3qC6LeYmNyrZ4g1PsD nbZ+ZjU3KlMPugGbnJCJbfsS0utUp2Wd9gHT32O4BUf0/Pxjo3utXvkzRQJ3SwHT X7QhXROkicAKRFrPxj0BaiLexC+yJR23wGp2YUVHLO4Aa/ptN8BJvH22+eDpsCLc f1DWJdvdbyPBUeoHNKevjvccsUYMlnBfe1jhJ9nRWHnq1axGV3bllcd6v4T07LgK LO28Krp4/V3tVN9Sq6jBoGTrULf5O1xtuBDVtANeXdha1oMUTYr4TUzTuOQxFgNu sCIpUWTKu+PNGmU5bhlryb20C2+PveE4EMK0InVVlqrhYJ/XjXFyRaZYYkM2NAKq XmeHVjMfc6Wmrd/nepuaTZvGGrK2kK/pCX/XuQthKKjAU6rO5X+FfGiSGodLPIYB 1m+QvfX8Re2IJkswm3lS68LKtG7SnjcSB9sY6PhVe6C6oP2ya2O1hthJ+TkrfNbc lhE5HzfCJZ9ujSjcQoUGwjrhnIezQX4KnfJZ =WdMQ -----END PGP SIGNATURE----- Merge tag 'trace-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: "Various fix-ups: - comment fixes - build fix - better memory alloction (don't use NR_CPUS) - configuration fix - build warning fix - enhanced callback parameter (to simplify users of trace hooks) - give up on stack tracing when RCU isn't watching (it's a lost cause)" * tag 'trace-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Have stack trace not record if RCU is not watching tracing: Pass export pointer as argument to ->write() ring-buffer: Remove unused function __rb_data_page_index() tracing: make PREEMPTIRQ_EVENTS depend on TRACING tracing: Allocate mask_str buffer dynamically tracing: always define trace_{irq,preempt}_{enable_disable} tracing: Fix code comments in trace.c
This commit is contained in:
commit
0424378781
@ -42,9 +42,11 @@ static struct stm_ftrace {
|
||||
* @len: length of the data packet
|
||||
*/
|
||||
static void notrace
|
||||
stm_ftrace_write(const void *buf, unsigned int len)
|
||||
stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
|
||||
{
|
||||
stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
|
||||
struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
|
||||
|
||||
stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
|
||||
}
|
||||
|
||||
static int stm_ftrace_link(struct stm_source_data *data)
|
||||
|
@ -18,7 +18,7 @@
|
||||
*/
|
||||
struct trace_export {
|
||||
struct trace_export __rcu *next;
|
||||
void (*write)(const void *, unsigned int);
|
||||
void (*write)(struct trace_export *, const void *, unsigned int);
|
||||
};
|
||||
|
||||
int register_ftrace_export(struct trace_export *export);
|
||||
|
@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
|
||||
#else /* !CONFIG_PREEMPTIRQ_EVENTS */
|
||||
#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
|
||||
|
||||
#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
|
||||
#define trace_irq_enable(...)
|
||||
#define trace_irq_disable(...)
|
||||
#define trace_preempt_enable(...)
|
||||
#define trace_preempt_disable(...)
|
||||
#define trace_irq_enable_rcuidle(...)
|
||||
#define trace_irq_disable_rcuidle(...)
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
|
||||
#define trace_preempt_enable(...)
|
||||
#define trace_preempt_disable(...)
|
||||
#define trace_preempt_enable_rcuidle(...)
|
||||
#define trace_preempt_disable_rcuidle(...)
|
||||
|
||||
#endif
|
||||
|
@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS
|
||||
bool "Enable trace events for preempt and irq disable/enable"
|
||||
select TRACE_IRQFLAGS
|
||||
depends on DEBUG_PREEMPT || !PROVE_LOCKING
|
||||
depends on TRACING
|
||||
default n
|
||||
help
|
||||
Enable tracing of disable and enable events for preemption and irqs.
|
||||
|
@ -1799,12 +1799,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
|
||||
|
||||
static __always_inline void *
|
||||
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
|
||||
{
|
||||
return bpage->data + index;
|
||||
}
|
||||
|
||||
static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
|
||||
{
|
||||
return bpage->page->data + index;
|
||||
|
@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
|
||||
}
|
||||
|
||||
/**
|
||||
* trace_pid_filter_add_remove - Add or remove a task from a pid_list
|
||||
* trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
|
||||
* @pid_list: The list to modify
|
||||
* @self: The current task for fork or NULL for exit
|
||||
* @task: The task to add or remove
|
||||
@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
|
||||
}
|
||||
|
||||
/**
|
||||
* trace_snapshot - take a snapshot of the current buffer.
|
||||
* tracing_snapshot - take a snapshot of the current buffer.
|
||||
*
|
||||
* This causes a swap between the snapshot buffer and the current live
|
||||
* tracing buffer. You can use this to take snapshots of the live
|
||||
@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
|
||||
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
|
||||
|
||||
/**
|
||||
* trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
|
||||
* tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
|
||||
*
|
||||
* This is similar to trace_snapshot(), but it will allocate the
|
||||
* This is similar to tracing_snapshot(), but it will allocate the
|
||||
* snapshot buffer if it isn't already allocated. Use this only
|
||||
* where it is safe to sleep, as the allocation may sleep.
|
||||
*
|
||||
@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh;
|
||||
/*
|
||||
* Copy the new maximum trace into the separate maximum-trace
|
||||
* structure. (this way the maximum trace is permanently saved,
|
||||
* for later retrieval via /sys/kernel/debug/tracing/latency_trace)
|
||||
* for later retrieval via /sys/kernel/tracing/tracing_max_latency)
|
||||
*/
|
||||
static void
|
||||
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
@ -2415,7 +2415,7 @@ trace_process_export(struct trace_export *export,
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
size = ring_buffer_event_length(event);
|
||||
export->write(entry, size);
|
||||
export->write(export, entry, size);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(ftrace_export_lock);
|
||||
@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = {
|
||||
.llseek = seq_lseek,
|
||||
};
|
||||
|
||||
/*
|
||||
* The tracer itself will not take this lock, but still we want
|
||||
* to provide a consistent cpumask to user-space:
|
||||
*/
|
||||
static DEFINE_MUTEX(tracing_cpumask_update_lock);
|
||||
|
||||
/*
|
||||
* Temporary storage for the character representation of the
|
||||
* CPU bitmask (and one more byte for the newline):
|
||||
*/
|
||||
static char mask_str[NR_CPUS + 1];
|
||||
|
||||
static ssize_t
|
||||
tracing_cpumask_read(struct file *filp, char __user *ubuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct trace_array *tr = file_inode(filp)->i_private;
|
||||
char *mask_str;
|
||||
int len;
|
||||
|
||||
mutex_lock(&tracing_cpumask_update_lock);
|
||||
len = snprintf(NULL, 0, "%*pb\n",
|
||||
cpumask_pr_args(tr->tracing_cpumask)) + 1;
|
||||
mask_str = kmalloc(len, GFP_KERNEL);
|
||||
if (!mask_str)
|
||||
return -ENOMEM;
|
||||
|
||||
len = snprintf(mask_str, count, "%*pb\n",
|
||||
len = snprintf(mask_str, len, "%*pb\n",
|
||||
cpumask_pr_args(tr->tracing_cpumask));
|
||||
if (len >= count) {
|
||||
count = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
|
||||
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
|
||||
|
||||
out_err:
|
||||
mutex_unlock(&tracing_cpumask_update_lock);
|
||||
kfree(mask_str);
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
mutex_lock(&tracing_cpumask_update_lock);
|
||||
|
||||
local_irq_disable();
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
for_each_tracing_cpu(cpu) {
|
||||
@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
||||
local_irq_enable();
|
||||
|
||||
cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
|
||||
|
||||
mutex_unlock(&tracing_cpumask_update_lock);
|
||||
free_cpumask_var(tracing_cpumask_new);
|
||||
|
||||
return count;
|
||||
|
@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
||||
if (__this_cpu_read(disable_stack_tracer) != 1)
|
||||
goto out;
|
||||
|
||||
/* If rcu is not watching, then save stack trace can fail */
|
||||
if (!rcu_is_watching())
|
||||
goto out;
|
||||
|
||||
ip += MCOUNT_INSN_SIZE;
|
||||
|
||||
check_stack(ip, &stack);
|
||||
|
Loading…
Reference in New Issue
Block a user