forked from Minki/linux
Various tracing fixes:
- Fix a function comparison warning for a xen trace event macro - Fix a double perf_event linking to a trace_uprobe_filter for multiple events - Fix suspicious RCU warnings in trace event code for using list_for_each_entry_rcu() when the "_rcu" portion wasn't needed. - Fix a bug in the histogram code when using the same variable - Fix a NULL pointer dereference when tracefs lockdown enabled and calling trace_set_default_clock() This v2 version contains: - A fix to a bug found with the double perf_event linking patch -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCXinakBQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qhNZAQCi86p9eW3f3w7hM2hZcirC+mQKVZgp 2rO4zIAK5V6G7gEAh6I7VZa50a6AE647ZjryE7ufTRUhmSFMWoG0kcJ7OAk= =/J9n -----END PGP SIGNATURE----- Merge tag 'trace-v5.5-rc6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: "Various tracing fixes: - Fix a function comparison warning for a xen trace event macro - Fix a double perf_event linking to a trace_uprobe_filter for multiple events - Fix suspicious RCU warnings in trace event code for using list_for_each_entry_rcu() when the "_rcu" portion wasn't needed. - Fix a bug in the histogram code when using the same variable - Fix a NULL pointer dereference when tracefs lockdown enabled and calling trace_set_default_clock() - A fix to a bug found with the double perf_event linking patch" * tag 'trace-v5.5-rc6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing/uprobe: Fix to make trace_uprobe_filter alignment safe tracing: Do not set trace clock if tracefs lockdown is in effect tracing: Fix histogram code when expression has same var as value tracing: trigger: Replace unneeded RCU-list traversals tracing/uprobe: Fix double perf_event linking on multiprobe uprobe tracing: xen: Ordered comparison of function pointers
This commit is contained in:
commit
34597c85be
@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
|
|||||||
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
|
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
|
||||||
TP_ARGS(fn, data),
|
TP_ARGS(fn, data),
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(xen_mc_callback_fn_t, fn)
|
/*
|
||||||
|
* Use field_struct to avoid is_signed_type()
|
||||||
|
* comparison of a function pointer.
|
||||||
|
*/
|
||||||
|
__field_struct(xen_mc_callback_fn_t, fn)
|
||||||
__field(void *, data)
|
__field(void *, data)
|
||||||
),
|
),
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
|
@ -9420,6 +9420,11 @@ __init static int tracing_set_default_clock(void)
|
|||||||
{
|
{
|
||||||
/* sched_clock_stable() is determined in late_initcall */
|
/* sched_clock_stable() is determined in late_initcall */
|
||||||
if (!trace_boot_clock && !sched_clock_stable()) {
|
if (!trace_boot_clock && !sched_clock_stable()) {
|
||||||
|
if (security_locked_down(LOCKDOWN_TRACEFS)) {
|
||||||
|
pr_warn("Can not set tracing clock due to lockdown\n");
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"Unstable clock detected, switching default tracing clock to \"global\"\n"
|
"Unstable clock detected, switching default tracing clock to \"global\"\n"
|
||||||
"If you want to keep using the local clock, then add:\n"
|
"If you want to keep using the local clock, then add:\n"
|
||||||
|
@ -116,6 +116,7 @@ struct hist_field {
|
|||||||
struct ftrace_event_field *field;
|
struct ftrace_event_field *field;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
hist_field_fn_t fn;
|
hist_field_fn_t fn;
|
||||||
|
unsigned int ref;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
unsigned int is_signed;
|
unsigned int is_signed;
|
||||||
@ -1766,11 +1767,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data,
|
|||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
struct hist_field *hist_field;
|
struct hist_field *hist_field;
|
||||||
|
|
||||||
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
hist_field = find_var_field(hist_data, var_name);
|
hist_field = find_var_field(hist_data, var_name);
|
||||||
if (hist_field)
|
if (hist_field)
|
||||||
return hist_field;
|
return hist_field;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
test_data = test->private_data;
|
test_data = test->private_data;
|
||||||
hist_field = find_var_field(test_data, var_name);
|
hist_field = find_var_field(test_data, var_name);
|
||||||
@ -1820,7 +1823,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file,
|
|||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
struct hist_field *hist_field;
|
struct hist_field *hist_field;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
test_data = test->private_data;
|
test_data = test->private_data;
|
||||||
hist_field = find_var_field(test_data, var_name);
|
hist_field = find_var_field(test_data, var_name);
|
||||||
@ -2423,8 +2428,16 @@ static int contains_operator(char *str)
|
|||||||
return field_op;
|
return field_op;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void get_hist_field(struct hist_field *hist_field)
|
||||||
|
{
|
||||||
|
hist_field->ref++;
|
||||||
|
}
|
||||||
|
|
||||||
static void __destroy_hist_field(struct hist_field *hist_field)
|
static void __destroy_hist_field(struct hist_field *hist_field)
|
||||||
{
|
{
|
||||||
|
if (--hist_field->ref > 1)
|
||||||
|
return;
|
||||||
|
|
||||||
kfree(hist_field->var.name);
|
kfree(hist_field->var.name);
|
||||||
kfree(hist_field->name);
|
kfree(hist_field->name);
|
||||||
kfree(hist_field->type);
|
kfree(hist_field->type);
|
||||||
@ -2466,6 +2479,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
|
|||||||
if (!hist_field)
|
if (!hist_field)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
hist_field->ref = 1;
|
||||||
|
|
||||||
hist_field->hist_data = hist_data;
|
hist_field->hist_data = hist_data;
|
||||||
|
|
||||||
if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
|
if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
|
||||||
@ -2661,6 +2676,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
|
|||||||
{
|
{
|
||||||
unsigned long flags = HIST_FIELD_FL_VAR_REF;
|
unsigned long flags = HIST_FIELD_FL_VAR_REF;
|
||||||
struct hist_field *ref_field;
|
struct hist_field *ref_field;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Check if the variable already exists */
|
||||||
|
for (i = 0; i < hist_data->n_var_refs; i++) {
|
||||||
|
ref_field = hist_data->var_refs[i];
|
||||||
|
if (ref_field->var.idx == var_field->var.idx &&
|
||||||
|
ref_field->var.hist_data == var_field->hist_data) {
|
||||||
|
get_hist_field(ref_field);
|
||||||
|
return ref_field;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
|
ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
|
||||||
if (ref_field) {
|
if (ref_field) {
|
||||||
@ -3115,7 +3141,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
|
|||||||
{
|
{
|
||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
if (test->private_data == hist_data)
|
if (test->private_data == hist_data)
|
||||||
return test->filter_str;
|
return test->filter_str;
|
||||||
@ -3166,9 +3194,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data,
|
|||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
unsigned int n_keys;
|
unsigned int n_keys;
|
||||||
|
|
||||||
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
|
n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
hist_data = test->private_data;
|
hist_data = test->private_data;
|
||||||
|
|
||||||
@ -5528,7 +5558,7 @@ static int hist_show(struct seq_file *m, void *v)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_rcu(data, &event_file->triggers, list) {
|
list_for_each_entry(data, &event_file->triggers, list) {
|
||||||
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
|
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
|
||||||
hist_trigger_show(m, data, n++);
|
hist_trigger_show(m, data, n++);
|
||||||
}
|
}
|
||||||
@ -5921,7 +5951,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
|
|||||||
if (hist_data->attrs->name && !named_data)
|
if (hist_data->attrs->name && !named_data)
|
||||||
goto new;
|
goto new;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
if (!hist_trigger_match(data, test, named_data, false))
|
if (!hist_trigger_match(data, test, named_data, false))
|
||||||
continue;
|
continue;
|
||||||
@ -6005,10 +6037,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data,
|
|||||||
struct event_trigger_data *test, *named_data = NULL;
|
struct event_trigger_data *test, *named_data = NULL;
|
||||||
bool match = false;
|
bool match = false;
|
||||||
|
|
||||||
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
if (hist_data->attrs->name)
|
if (hist_data->attrs->name)
|
||||||
named_data = find_named_trigger(hist_data->attrs->name);
|
named_data = find_named_trigger(hist_data->attrs->name);
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
if (hist_trigger_match(data, test, named_data, false)) {
|
if (hist_trigger_match(data, test, named_data, false)) {
|
||||||
match = true;
|
match = true;
|
||||||
@ -6026,10 +6060,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
|
|||||||
struct hist_trigger_data *hist_data = data->private_data;
|
struct hist_trigger_data *hist_data = data->private_data;
|
||||||
struct event_trigger_data *test, *named_data = NULL;
|
struct event_trigger_data *test, *named_data = NULL;
|
||||||
|
|
||||||
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
if (hist_data->attrs->name)
|
if (hist_data->attrs->name)
|
||||||
named_data = find_named_trigger(hist_data->attrs->name);
|
named_data = find_named_trigger(hist_data->attrs->name);
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
if (!hist_trigger_match(data, test, named_data, false))
|
if (!hist_trigger_match(data, test, named_data, false))
|
||||||
continue;
|
continue;
|
||||||
@ -6051,10 +6087,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
|||||||
struct event_trigger_data *test, *named_data = NULL;
|
struct event_trigger_data *test, *named_data = NULL;
|
||||||
bool unregistered = false;
|
bool unregistered = false;
|
||||||
|
|
||||||
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
if (hist_data->attrs->name)
|
if (hist_data->attrs->name)
|
||||||
named_data = find_named_trigger(hist_data->attrs->name);
|
named_data = find_named_trigger(hist_data->attrs->name);
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
if (!hist_trigger_match(data, test, named_data, false))
|
if (!hist_trigger_match(data, test, named_data, false))
|
||||||
continue;
|
continue;
|
||||||
@ -6080,7 +6118,9 @@ static bool hist_file_check_refs(struct trace_event_file *file)
|
|||||||
struct hist_trigger_data *hist_data;
|
struct hist_trigger_data *hist_data;
|
||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
hist_data = test->private_data;
|
hist_data = test->private_data;
|
||||||
if (check_var_refs(hist_data))
|
if (check_var_refs(hist_data))
|
||||||
@ -6323,7 +6363,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
|
|||||||
struct enable_trigger_data *enable_data = data->private_data;
|
struct enable_trigger_data *enable_data = data->private_data;
|
||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
|
list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
|
||||||
|
lockdep_is_held(&event_mutex)) {
|
||||||
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||||
if (enable_data->enable)
|
if (enable_data->enable)
|
||||||
test->paused = false;
|
test->paused = false;
|
||||||
|
@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file)
|
|||||||
struct event_trigger_data *data;
|
struct event_trigger_data *data;
|
||||||
bool set_cond = false;
|
bool set_cond = false;
|
||||||
|
|
||||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(data, &file->triggers, list) {
|
||||||
if (data->filter || event_command_post_trigger(data->cmd_ops) ||
|
if (data->filter || event_command_post_trigger(data->cmd_ops) ||
|
||||||
event_command_needs_rec(data->cmd_ops)) {
|
event_command_needs_rec(data->cmd_ops)) {
|
||||||
set_cond = true;
|
set_cond = true;
|
||||||
@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
|
|||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
|
if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
|
||||||
ret = -EEXIST;
|
ret = -EEXIST;
|
||||||
goto out;
|
goto out;
|
||||||
@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
|||||||
struct event_trigger_data *data;
|
struct event_trigger_data *data;
|
||||||
bool unregistered = false;
|
bool unregistered = false;
|
||||||
|
|
||||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(data, &file->triggers, list) {
|
||||||
if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
|
if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
|
||||||
unregistered = true;
|
unregistered = true;
|
||||||
list_del_rcu(&data->list);
|
list_del_rcu(&data->list);
|
||||||
@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob,
|
|||||||
struct event_trigger_data *test;
|
struct event_trigger_data *test;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
list_for_each_entry_rcu(test, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(test, &file->triggers, list) {
|
||||||
test_enable_data = test->private_data;
|
test_enable_data = test->private_data;
|
||||||
if (test_enable_data &&
|
if (test_enable_data &&
|
||||||
(test->cmd_ops->trigger_type ==
|
(test->cmd_ops->trigger_type ==
|
||||||
@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob,
|
|||||||
struct event_trigger_data *data;
|
struct event_trigger_data *data;
|
||||||
bool unregistered = false;
|
bool unregistered = false;
|
||||||
|
|
||||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
lockdep_assert_held(&event_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(data, &file->triggers, list) {
|
||||||
enable_data = data->private_data;
|
enable_data = data->private_data;
|
||||||
if (enable_data &&
|
if (enable_data &&
|
||||||
(data->cmd_ops->trigger_type ==
|
(data->cmd_ops->trigger_type ==
|
||||||
|
@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
|
|||||||
INIT_HLIST_NODE(&tk->rp.kp.hlist);
|
INIT_HLIST_NODE(&tk->rp.kp.hlist);
|
||||||
INIT_LIST_HEAD(&tk->rp.kp.list);
|
INIT_LIST_HEAD(&tk->rp.kp.list);
|
||||||
|
|
||||||
ret = trace_probe_init(&tk->tp, event, group);
|
ret = trace_probe_init(&tk->tp, event, group, false);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -984,15 +984,19 @@ void trace_probe_cleanup(struct trace_probe *tp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int trace_probe_init(struct trace_probe *tp, const char *event,
|
int trace_probe_init(struct trace_probe *tp, const char *event,
|
||||||
const char *group)
|
const char *group, bool alloc_filter)
|
||||||
{
|
{
|
||||||
struct trace_event_call *call;
|
struct trace_event_call *call;
|
||||||
|
size_t size = sizeof(struct trace_probe_event);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!event || !group)
|
if (!event || !group)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
|
if (alloc_filter)
|
||||||
|
size += sizeof(struct trace_uprobe_filter);
|
||||||
|
|
||||||
|
tp->event = kzalloc(size, GFP_KERNEL);
|
||||||
if (!tp->event)
|
if (!tp->event)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -223,6 +223,12 @@ struct probe_arg {
|
|||||||
const struct fetch_type *type; /* Type of this argument */
|
const struct fetch_type *type; /* Type of this argument */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct trace_uprobe_filter {
|
||||||
|
rwlock_t rwlock;
|
||||||
|
int nr_systemwide;
|
||||||
|
struct list_head perf_events;
|
||||||
|
};
|
||||||
|
|
||||||
/* Event call and class holder */
|
/* Event call and class holder */
|
||||||
struct trace_probe_event {
|
struct trace_probe_event {
|
||||||
unsigned int flags; /* For TP_FLAG_* */
|
unsigned int flags; /* For TP_FLAG_* */
|
||||||
@ -230,6 +236,7 @@ struct trace_probe_event {
|
|||||||
struct trace_event_call call;
|
struct trace_event_call call;
|
||||||
struct list_head files;
|
struct list_head files;
|
||||||
struct list_head probes;
|
struct list_head probes;
|
||||||
|
struct trace_uprobe_filter filter[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct trace_probe {
|
struct trace_probe {
|
||||||
@ -322,7 +329,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int trace_probe_init(struct trace_probe *tp, const char *event,
|
int trace_probe_init(struct trace_probe *tp, const char *event,
|
||||||
const char *group);
|
const char *group, bool alloc_filter);
|
||||||
void trace_probe_cleanup(struct trace_probe *tp);
|
void trace_probe_cleanup(struct trace_probe *tp);
|
||||||
int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
|
int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
|
||||||
void trace_probe_unlink(struct trace_probe *tp);
|
void trace_probe_unlink(struct trace_probe *tp);
|
||||||
|
@ -34,12 +34,6 @@ struct uprobe_trace_entry_head {
|
|||||||
#define DATAOF_TRACE_ENTRY(entry, is_return) \
|
#define DATAOF_TRACE_ENTRY(entry, is_return) \
|
||||||
((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
|
((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
|
||||||
|
|
||||||
struct trace_uprobe_filter {
|
|
||||||
rwlock_t rwlock;
|
|
||||||
int nr_systemwide;
|
|
||||||
struct list_head perf_events;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int trace_uprobe_create(int argc, const char **argv);
|
static int trace_uprobe_create(int argc, const char **argv);
|
||||||
static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
|
static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
|
||||||
static int trace_uprobe_release(struct dyn_event *ev);
|
static int trace_uprobe_release(struct dyn_event *ev);
|
||||||
@ -60,7 +54,6 @@ static struct dyn_event_operations trace_uprobe_ops = {
|
|||||||
*/
|
*/
|
||||||
struct trace_uprobe {
|
struct trace_uprobe {
|
||||||
struct dyn_event devent;
|
struct dyn_event devent;
|
||||||
struct trace_uprobe_filter filter;
|
|
||||||
struct uprobe_consumer consumer;
|
struct uprobe_consumer consumer;
|
||||||
struct path path;
|
struct path path;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
@ -351,7 +344,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
|
|||||||
if (!tu)
|
if (!tu)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
ret = trace_probe_init(&tu->tp, event, group);
|
ret = trace_probe_init(&tu->tp, event, group, true);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
@ -359,7 +352,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
|
|||||||
tu->consumer.handler = uprobe_dispatcher;
|
tu->consumer.handler = uprobe_dispatcher;
|
||||||
if (is_ret)
|
if (is_ret)
|
||||||
tu->consumer.ret_handler = uretprobe_dispatcher;
|
tu->consumer.ret_handler = uretprobe_dispatcher;
|
||||||
init_trace_uprobe_filter(&tu->filter);
|
init_trace_uprobe_filter(tu->tp.event->filter);
|
||||||
return tu;
|
return tu;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
@ -1067,13 +1060,14 @@ static void __probe_event_disable(struct trace_probe *tp)
|
|||||||
struct trace_probe *pos;
|
struct trace_probe *pos;
|
||||||
struct trace_uprobe *tu;
|
struct trace_uprobe *tu;
|
||||||
|
|
||||||
|
tu = container_of(tp, struct trace_uprobe, tp);
|
||||||
|
WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
|
||||||
|
|
||||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||||
tu = container_of(pos, struct trace_uprobe, tp);
|
tu = container_of(pos, struct trace_uprobe, tp);
|
||||||
if (!tu->inode)
|
if (!tu->inode)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
WARN_ON(!uprobe_filter_is_empty(&tu->filter));
|
|
||||||
|
|
||||||
uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
|
uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
|
||||||
tu->inode = NULL;
|
tu->inode = NULL;
|
||||||
}
|
}
|
||||||
@ -1108,7 +1102,7 @@ static int probe_event_enable(struct trace_event_call *call,
|
|||||||
}
|
}
|
||||||
|
|
||||||
tu = container_of(tp, struct trace_uprobe, tp);
|
tu = container_of(tp, struct trace_uprobe, tp);
|
||||||
WARN_ON(!uprobe_filter_is_empty(&tu->filter));
|
WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
|
||||||
|
|
||||||
if (enabled)
|
if (enabled)
|
||||||
return 0;
|
return 0;
|
||||||
@ -1205,39 +1199,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
|
trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
|
||||||
|
struct perf_event *event)
|
||||||
{
|
{
|
||||||
return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
|
return __uprobe_perf_filter(filter, event->hw.target->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
|
static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
|
||||||
|
struct perf_event *event)
|
||||||
{
|
{
|
||||||
bool done;
|
bool done;
|
||||||
|
|
||||||
write_lock(&tu->filter.rwlock);
|
write_lock(&filter->rwlock);
|
||||||
if (event->hw.target) {
|
if (event->hw.target) {
|
||||||
list_del(&event->hw.tp_list);
|
list_del(&event->hw.tp_list);
|
||||||
done = tu->filter.nr_systemwide ||
|
done = filter->nr_systemwide ||
|
||||||
(event->hw.target->flags & PF_EXITING) ||
|
(event->hw.target->flags & PF_EXITING) ||
|
||||||
uprobe_filter_event(tu, event);
|
trace_uprobe_filter_event(filter, event);
|
||||||
} else {
|
} else {
|
||||||
tu->filter.nr_systemwide--;
|
filter->nr_systemwide--;
|
||||||
done = tu->filter.nr_systemwide;
|
done = filter->nr_systemwide;
|
||||||
}
|
}
|
||||||
write_unlock(&tu->filter.rwlock);
|
write_unlock(&filter->rwlock);
|
||||||
|
|
||||||
if (!done)
|
return done;
|
||||||
return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
|
/* This returns true if the filter always covers target mm */
|
||||||
|
static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
|
||||||
|
struct perf_event *event)
|
||||||
{
|
{
|
||||||
bool done;
|
bool done;
|
||||||
int err;
|
|
||||||
|
|
||||||
write_lock(&tu->filter.rwlock);
|
write_lock(&filter->rwlock);
|
||||||
if (event->hw.target) {
|
if (event->hw.target) {
|
||||||
/*
|
/*
|
||||||
* event->parent != NULL means copy_process(), we can avoid
|
* event->parent != NULL means copy_process(), we can avoid
|
||||||
@ -1247,28 +1241,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
|
|||||||
* attr.enable_on_exec means that exec/mmap will install the
|
* attr.enable_on_exec means that exec/mmap will install the
|
||||||
* breakpoints we need.
|
* breakpoints we need.
|
||||||
*/
|
*/
|
||||||
done = tu->filter.nr_systemwide ||
|
done = filter->nr_systemwide ||
|
||||||
event->parent || event->attr.enable_on_exec ||
|
event->parent || event->attr.enable_on_exec ||
|
||||||
uprobe_filter_event(tu, event);
|
trace_uprobe_filter_event(filter, event);
|
||||||
list_add(&event->hw.tp_list, &tu->filter.perf_events);
|
list_add(&event->hw.tp_list, &filter->perf_events);
|
||||||
} else {
|
} else {
|
||||||
done = tu->filter.nr_systemwide;
|
done = filter->nr_systemwide;
|
||||||
tu->filter.nr_systemwide++;
|
filter->nr_systemwide++;
|
||||||
}
|
}
|
||||||
write_unlock(&tu->filter.rwlock);
|
write_unlock(&filter->rwlock);
|
||||||
|
|
||||||
err = 0;
|
return done;
|
||||||
if (!done) {
|
|
||||||
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
|
|
||||||
if (err)
|
|
||||||
uprobe_perf_close(tu, event);
|
|
||||||
}
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uprobe_perf_multi_call(struct trace_event_call *call,
|
static int uprobe_perf_close(struct trace_event_call *call,
|
||||||
struct perf_event *event,
|
struct perf_event *event)
|
||||||
int (*op)(struct trace_uprobe *tu, struct perf_event *event))
|
|
||||||
{
|
{
|
||||||
struct trace_probe *pos, *tp;
|
struct trace_probe *pos, *tp;
|
||||||
struct trace_uprobe *tu;
|
struct trace_uprobe *tu;
|
||||||
@ -1278,25 +1265,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call,
|
|||||||
if (WARN_ON_ONCE(!tp))
|
if (WARN_ON_ONCE(!tp))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
tu = container_of(tp, struct trace_uprobe, tp);
|
||||||
|
if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
|
||||||
|
return 0;
|
||||||
|
|
||||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||||
tu = container_of(pos, struct trace_uprobe, tp);
|
tu = container_of(pos, struct trace_uprobe, tp);
|
||||||
ret = op(tu, event);
|
ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int uprobe_perf_open(struct trace_event_call *call,
|
||||||
|
struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct trace_probe *pos, *tp;
|
||||||
|
struct trace_uprobe *tu;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
tp = trace_probe_primary_from_call(call);
|
||||||
|
if (WARN_ON_ONCE(!tp))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
tu = container_of(tp, struct trace_uprobe, tp);
|
||||||
|
if (trace_uprobe_filter_add(tu->tp.event->filter, event))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||||
|
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
|
||||||
|
if (err) {
|
||||||
|
uprobe_perf_close(call, event);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static bool uprobe_perf_filter(struct uprobe_consumer *uc,
|
static bool uprobe_perf_filter(struct uprobe_consumer *uc,
|
||||||
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
|
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
struct trace_uprobe_filter *filter;
|
||||||
struct trace_uprobe *tu;
|
struct trace_uprobe *tu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
tu = container_of(uc, struct trace_uprobe, consumer);
|
tu = container_of(uc, struct trace_uprobe, consumer);
|
||||||
read_lock(&tu->filter.rwlock);
|
filter = tu->tp.event->filter;
|
||||||
ret = __uprobe_perf_filter(&tu->filter, mm);
|
|
||||||
read_unlock(&tu->filter.rwlock);
|
read_lock(&filter->rwlock);
|
||||||
|
ret = __uprobe_perf_filter(filter, mm);
|
||||||
|
read_unlock(&filter->rwlock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1419,10 +1440,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
case TRACE_REG_PERF_OPEN:
|
case TRACE_REG_PERF_OPEN:
|
||||||
return uprobe_perf_multi_call(event, data, uprobe_perf_open);
|
return uprobe_perf_open(event, data);
|
||||||
|
|
||||||
case TRACE_REG_PERF_CLOSE:
|
case TRACE_REG_PERF_CLOSE:
|
||||||
return uprobe_perf_multi_call(event, data, uprobe_perf_close);
|
return uprobe_perf_close(event, data);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
|
Loading…
Reference in New Issue
Block a user