mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Tracing updates for 6.2:
- Fix filter memory leak by calling ftrace_free_filter() - Initialize trace_printk() earlier so that ftrace_dump_on_oops shows data on early crashes. - Update the outdated instructions in scripts/tracing/ftrace-bisect.sh - Add lockdep_is_held() to fix lockdep warning - Add allocation failure check in create_hist_field() - Don't initialize pointer that gets set right away in enabled_monitors_write() - Update MAINTAINER entries - Fix help messages in Kconfigs - Fix kernel-doc header for update_preds() -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCY9MaCBQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qqb4AP4p78mTVP/Rw5oLCRUM4E7JGb8hmxuZ HD0RR9fbvyDaQAD/ffkar/KmvGCfCvBNrkGvvx98dwtGIssIB1dShoEZPAU= =lBwh -----END PGP SIGNATURE----- Merge tag 'trace-v6.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace Pull tracing fixes from Steven Rostedt: - Fix filter memory leak by calling ftrace_free_filter() - Initialize trace_printk() earlier so that ftrace_dump_on_oops shows data on early crashes. - Update the outdated instructions in scripts/tracing/ftrace-bisect.sh - Add lockdep_is_held() to fix lockdep warning - Add allocation failure check in create_hist_field() - Don't initialize pointer that gets set right away in enabled_monitors_write() - Update MAINTAINER entries - Fix help messages in Kconfigs - Fix kernel-doc header for update_preds() * tag 'trace-v6.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: bootconfig: Update MAINTAINERS file to add tree and mailing list rv: remove redundant initialization of pointer ptr ftrace: Maintain samples/ftrace tracing/filter: fix kernel-doc warnings lib: Kconfig: fix spellos trace_events_hist: add check for return value of 'create_hist_field' tracing/osnoise: Use built-in RCU list checking tracing: Kconfig: Fix spelling/grammar/punctuation ftrace/scripts: Update the instructions for ftrace-bisect.sh tracing: Make sure trace_printk() can output as soon as it can be used ftrace: Export ftrace_free_filter() to modules
This commit is contained in:
commit
d786f0fe5e
@ -7893,7 +7893,11 @@ F: include/linux/extcon/
|
||||
|
||||
EXTRA BOOT CONFIG
|
||||
M: Masami Hiramatsu <mhiramat@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-trace-kernel@vger.kernel.org
|
||||
Q: https://patchwork.kernel.org/project/linux-trace-kernel/list/
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git
|
||||
F: Documentation/admin-guide/bootconfig.rst
|
||||
F: fs/proc/bootconfig.c
|
||||
F: include/linux/bootconfig.h
|
||||
@ -8569,6 +8573,7 @@ F: kernel/trace/fgraph.c
|
||||
F: arch/*/*/*/*ftrace*
|
||||
F: arch/*/*/*ftrace*
|
||||
F: include/*/ftrace.h
|
||||
F: samples/ftrace
|
||||
|
||||
FUNGIBLE ETHERNET DRIVERS
|
||||
M: Dimitris Michailidis <dmichail@fungible.com>
|
||||
|
@ -933,8 +933,8 @@ config RING_BUFFER_RECORD_RECURSION
|
||||
default y
|
||||
help
|
||||
The ring buffer has its own internal recursion. Although when
|
||||
recursion happens it wont cause harm because of the protection,
|
||||
but it does cause an unwanted overhead. Enabling this option will
|
||||
recursion happens it won't cause harm because of the protection,
|
||||
but it does cause unwanted overhead. Enabling this option will
|
||||
place where recursion was detected into the ftrace "recursed_functions"
|
||||
file.
|
||||
|
||||
@ -1017,8 +1017,8 @@ config RING_BUFFER_STARTUP_TEST
|
||||
The test runs for 10 seconds. This will slow your boot time
|
||||
by at least 10 more seconds.
|
||||
|
||||
At the end of the test, statics and more checks are done.
|
||||
It will output the stats of each per cpu buffer. What
|
||||
At the end of the test, statistics and more checks are done.
|
||||
It will output the stats of each per cpu buffer: What
|
||||
was written, the sizes, what was read, what was lost, and
|
||||
other similar details.
|
||||
|
||||
|
@ -1248,12 +1248,17 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
|
||||
call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_free_filter - remove all filters for an ftrace_ops
|
||||
* @ops - the ops to remove the filters from
|
||||
*/
|
||||
void ftrace_free_filter(struct ftrace_ops *ops)
|
||||
{
|
||||
ftrace_ops_init(ops);
|
||||
free_ftrace_hash(ops->func_hash->filter_hash);
|
||||
free_ftrace_hash(ops->func_hash->notrace_hash);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ftrace_free_filter);
|
||||
|
||||
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
|
||||
{
|
||||
@ -5839,6 +5844,10 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
|
||||
*
|
||||
* Filters denote which functions should be enabled when tracing is enabled
|
||||
* If @ip is NULL, it fails to update filter.
|
||||
*
|
||||
* This can allocate memory which must be freed before @ops can be freed,
|
||||
* either by removing each filtered addr or by using
|
||||
* ftrace_free_filter(@ops).
|
||||
*/
|
||||
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
|
||||
int remove, int reset)
|
||||
@ -5858,7 +5867,11 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
|
||||
*
|
||||
* Filters denote which functions should be enabled when tracing is enabled
|
||||
* If @ips array or any ip specified within is NULL , it fails to update filter.
|
||||
*/
|
||||
*
|
||||
* This can allocate memory which must be freed before @ops can be freed,
|
||||
* either by removing each filtered addr or by using
|
||||
* ftrace_free_filter(@ops).
|
||||
*/
|
||||
int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
|
||||
unsigned int cnt, int remove, int reset)
|
||||
{
|
||||
@ -5900,6 +5913,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
|
||||
*
|
||||
* Filters denote which functions should be enabled when tracing is enabled.
|
||||
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
|
||||
*
|
||||
* This can allocate memory which must be freed before @ops can be freed,
|
||||
* either by removing each filtered addr or by using
|
||||
* ftrace_free_filter(@ops).
|
||||
*/
|
||||
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
|
||||
int len, int reset)
|
||||
@ -5919,6 +5936,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
|
||||
* Notrace Filters denote which functions should not be enabled when tracing
|
||||
* is enabled. If @buf is NULL and reset is set, all functions will be enabled
|
||||
* for tracing.
|
||||
*
|
||||
* This can allocate memory which must be freed before @ops can be freed,
|
||||
* either by removing each filtered addr or by using
|
||||
* ftrace_free_filter(@ops).
|
||||
*/
|
||||
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
|
||||
int len, int reset)
|
||||
|
@ -516,7 +516,7 @@ static ssize_t enabled_monitors_write(struct file *filp, const char __user *user
|
||||
struct rv_monitor_def *mdef;
|
||||
int retval = -EINVAL;
|
||||
bool enable = true;
|
||||
char *ptr = buff;
|
||||
char *ptr;
|
||||
int len;
|
||||
|
||||
if (count < 1 || count > MAX_RV_MONITOR_NAME_SIZE + 1)
|
||||
|
@ -10295,6 +10295,8 @@ void __init early_trace_init(void)
|
||||
static_key_enable(&tracepoint_printk_key.key);
|
||||
}
|
||||
tracer_alloc_buffers();
|
||||
|
||||
init_events();
|
||||
}
|
||||
|
||||
void __init trace_init(void)
|
||||
|
@ -1490,6 +1490,7 @@ extern void trace_event_enable_cmd_record(bool enable);
|
||||
extern void trace_event_enable_tgid_record(bool enable);
|
||||
|
||||
extern int event_trace_init(void);
|
||||
extern int init_events(void);
|
||||
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
|
||||
extern int event_trace_del_tracer(struct trace_array *tr);
|
||||
extern void __trace_early_add_events(struct trace_array *tr);
|
||||
|
@ -128,7 +128,7 @@ static bool is_not(const char *str)
|
||||
}
|
||||
|
||||
/**
|
||||
* prog_entry - a singe entry in the filter program
|
||||
* struct prog_entry - a singe entry in the filter program
|
||||
* @target: Index to jump to on a branch (actually one minus the index)
|
||||
* @when_to_branch: The value of the result of the predicate to do a branch
|
||||
* @pred: The predicate to execute.
|
||||
@ -140,16 +140,16 @@ struct prog_entry {
|
||||
};
|
||||
|
||||
/**
|
||||
* update_preds- assign a program entry a label target
|
||||
* update_preds - assign a program entry a label target
|
||||
* @prog: The program array
|
||||
* @N: The index of the current entry in @prog
|
||||
* @when_to_branch: What to assign a program entry for its branch condition
|
||||
* @invert: What to assign a program entry for its branch condition
|
||||
*
|
||||
* The program entry at @N has a target that points to the index of a program
|
||||
* entry that can have its target and when_to_branch fields updated.
|
||||
* Update the current program entry denoted by index @N target field to be
|
||||
* that of the updated entry. This will denote the entry to update if
|
||||
* we are processing an "||" after an "&&"
|
||||
* we are processing an "||" after an "&&".
|
||||
*/
|
||||
static void update_preds(struct prog_entry *prog, int N, int invert)
|
||||
{
|
||||
|
@ -1988,6 +1988,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
|
||||
hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 :
|
||||
HIST_FIELD_FN_BUCKET;
|
||||
hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
|
||||
if (!hist_field->operands[0])
|
||||
goto free;
|
||||
hist_field->size = hist_field->operands[0]->size;
|
||||
hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL);
|
||||
if (!hist_field->type)
|
||||
|
@ -147,9 +147,8 @@ static void osnoise_unregister_instance(struct trace_array *tr)
|
||||
* register/unregister serialization is provided by trace's
|
||||
* trace_types_lock.
|
||||
*/
|
||||
lockdep_assert_held(&trace_types_lock);
|
||||
|
||||
list_for_each_entry_rcu(inst, &osnoise_instances, list) {
|
||||
list_for_each_entry_rcu(inst, &osnoise_instances, list,
|
||||
lockdep_is_held(&trace_types_lock)) {
|
||||
if (inst->tr == tr) {
|
||||
list_del_rcu(&inst->list);
|
||||
found = 1;
|
||||
|
@ -1535,7 +1535,7 @@ static struct trace_event *events[] __initdata = {
|
||||
NULL
|
||||
};
|
||||
|
||||
__init static int init_events(void)
|
||||
__init int init_events(void)
|
||||
{
|
||||
struct trace_event *event;
|
||||
int i, ret;
|
||||
@ -1548,4 +1548,3 @@ __init static int init_events(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_events);
|
||||
|
@ -1917,7 +1917,7 @@ config FUNCTION_ERROR_INJECTION
|
||||
help
|
||||
Add fault injections into various functions that are annotated with
|
||||
ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
|
||||
value of theses functions. This is useful to test error paths of code.
|
||||
value of these functions. This is useful to test error paths of code.
|
||||
|
||||
If unsure, say N
|
||||
|
||||
|
@ -194,7 +194,7 @@ config KCSAN_WEAK_MEMORY
|
||||
Enable support for modeling a subset of weak memory, which allows
|
||||
detecting a subset of data races due to missing memory barriers.
|
||||
|
||||
Depends on KCSAN_STRICT, because the options strenghtening certain
|
||||
Depends on KCSAN_STRICT, because the options strengthening certain
|
||||
plain accesses by default (depending on !KCSAN_STRICT) reduce the
|
||||
ability to detect any data races invoving reordered accesses, in
|
||||
particular reordered writes.
|
||||
|
@ -152,6 +152,7 @@ static void __exit ftrace_direct_multi_exit(void)
|
||||
{
|
||||
kthread_stop(simple_tsk);
|
||||
unregister_ftrace_direct_multi(&direct, my_tramp);
|
||||
ftrace_free_filter(&direct);
|
||||
}
|
||||
|
||||
module_init(ftrace_direct_multi_init);
|
||||
|
@ -79,6 +79,7 @@ static int __init ftrace_direct_multi_init(void)
|
||||
static void __exit ftrace_direct_multi_exit(void)
|
||||
{
|
||||
unregister_ftrace_direct_multi(&direct, (unsigned long) my_tramp);
|
||||
ftrace_free_filter(&direct);
|
||||
}
|
||||
|
||||
module_init(ftrace_direct_multi_init);
|
||||
|
@ -12,7 +12,7 @@
|
||||
# (note, if this is a problem with function_graph tracing, then simply
|
||||
# replace "function" with "function_graph" in the following steps).
|
||||
#
|
||||
# # cd /sys/kernel/debug/tracing
|
||||
# # cd /sys/kernel/tracing
|
||||
# # echo schedule > set_ftrace_filter
|
||||
# # echo function > current_tracer
|
||||
#
|
||||
@ -20,22 +20,40 @@
|
||||
#
|
||||
# # echo nop > current_tracer
|
||||
#
|
||||
# # cat available_filter_functions > ~/full-file
|
||||
# Starting with v5.1 this can be done with numbers, making it much faster:
|
||||
#
|
||||
# The old (slow) way, for kernels before v5.1.
|
||||
#
|
||||
# [old-way] # cat available_filter_functions > ~/full-file
|
||||
#
|
||||
# [old-way] *** Note *** this process will take several minutes to update the
|
||||
# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we
|
||||
# [old-way] are dealing with thousands of functions. So go have coffee, talk
|
||||
# [old-way] with your coworkers, read facebook. And eventually, this operation
|
||||
# [old-way] will end.
|
||||
#
|
||||
# The new way (using numbers) is an O(n) operation, and usually takes less than a second.
|
||||
#
|
||||
# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file
|
||||
#
|
||||
# This will create a sequence of numbers that match the functions in
|
||||
# available_filter_functions, and when echoing in a number into the
|
||||
# set_ftrace_filter file, it will enable the corresponding function in
|
||||
# O(1) time. Making enabling all functions O(n) where n is the number of
|
||||
# functions to enable.
|
||||
#
|
||||
# For either the new or old way, the rest of the operations remain the same.
|
||||
#
|
||||
# # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
|
||||
# # cat ~/test-file > set_ftrace_filter
|
||||
#
|
||||
# *** Note *** this will take several minutes. Setting multiple functions is
|
||||
# an O(n^2) operation, and we are dealing with thousands of functions. So go
|
||||
# have coffee, talk with your coworkers, read facebook. And eventually, this
|
||||
# operation will end.
|
||||
#
|
||||
# # echo function > current_tracer
|
||||
#
|
||||
# If it crashes, we know that ~/test-file has a bad function.
|
||||
#
|
||||
# Reboot back to test kernel.
|
||||
#
|
||||
# # cd /sys/kernel/debug/tracing
|
||||
# # cd /sys/kernel/tracing
|
||||
# # mv ~/test-file ~/full-file
|
||||
#
|
||||
# If it didn't crash.
|
||||
|
Loading…
Reference in New Issue
Block a user