2018-08-16 15:23:53 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2012-04-11 10:30:43 +00:00
|
|
|
/*
|
|
|
|
* uprobes-based tracing events
|
|
|
|
*
|
|
|
|
* Copyright (C) IBM Corporation, 2010-2012
|
|
|
|
* Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
|
|
|
|
*/
|
2017-02-07 11:21:28 +00:00
|
|
|
#define pr_fmt(fmt) "trace_kprobe: " fmt
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/uprobes.h>
|
|
|
|
#include <linux/namei.h>
|
2012-12-18 00:01:27 +00:00
|
|
|
#include <linux/string.h>
|
2017-02-04 00:27:20 +00:00
|
|
|
#include <linux/rculist.h>
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
#include "trace_probe.h"
|
|
|
|
|
|
|
|
#define UPROBE_EVENT_SYSTEM "uprobes"
|
|
|
|
|
2013-03-29 17:26:51 +00:00
|
|
|
struct uprobe_trace_entry_head {
|
|
|
|
struct trace_entry ent;
|
|
|
|
unsigned long vaddr[];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SIZEOF_TRACE_ENTRY(is_return) \
|
|
|
|
(sizeof(struct uprobe_trace_entry_head) + \
|
|
|
|
sizeof(unsigned long) * (is_return ? 2 : 1))
|
|
|
|
|
|
|
|
#define DATAOF_TRACE_ENTRY(entry, is_return) \
|
|
|
|
((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
|
|
|
|
|
2013-02-03 19:58:35 +00:00
|
|
|
struct trace_uprobe_filter {
|
|
|
|
rwlock_t rwlock;
|
|
|
|
int nr_systemwide;
|
|
|
|
struct list_head perf_events;
|
|
|
|
};
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
/*
|
|
|
|
* uprobe event core functions
|
|
|
|
*/
|
|
|
|
struct trace_uprobe {
|
|
|
|
struct list_head list;
|
2013-02-03 19:58:35 +00:00
|
|
|
struct trace_uprobe_filter filter;
|
2013-01-31 18:47:23 +00:00
|
|
|
struct uprobe_consumer consumer;
|
2018-04-23 17:21:34 +00:00
|
|
|
struct path path;
|
2012-04-11 10:30:43 +00:00
|
|
|
struct inode *inode;
|
|
|
|
char *filename;
|
|
|
|
unsigned long offset;
|
|
|
|
unsigned long nhit;
|
2013-07-03 06:42:53 +00:00
|
|
|
struct trace_probe tp;
|
2012-04-11 10:30:43 +00:00
|
|
|
};
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
#define SIZEOF_TRACE_UPROBE(n) \
|
|
|
|
(offsetof(struct trace_uprobe, tp.args) + \
|
2012-04-11 10:30:43 +00:00
|
|
|
(sizeof(struct probe_arg) * (n)))
|
|
|
|
|
|
|
|
static int register_uprobe_event(struct trace_uprobe *tu);
|
2013-07-04 03:33:51 +00:00
|
|
|
static int unregister_uprobe_event(struct trace_uprobe *tu);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
static DEFINE_MUTEX(uprobe_lock);
|
|
|
|
static LIST_HEAD(uprobe_list);
|
|
|
|
|
2013-11-25 04:42:47 +00:00
|
|
|
struct uprobe_dispatch_data {
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
unsigned long bp_addr;
|
|
|
|
};
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
|
2013-03-30 17:25:23 +00:00
|
|
|
static int uretprobe_dispatcher(struct uprobe_consumer *con,
|
|
|
|
unsigned long func, struct pt_regs *regs);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-11-26 06:21:04 +00:00
|
|
|
#ifdef CONFIG_STACK_GROWSUP
|
|
|
|
static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
|
|
|
|
{
|
|
|
|
return addr - (n * sizeof(long));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
|
|
|
|
{
|
|
|
|
return addr + (n * sizeof(long));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
|
|
|
|
{
|
|
|
|
unsigned long ret;
|
|
|
|
unsigned long addr = user_stack_pointer(regs);
|
|
|
|
|
|
|
|
addr = adjust_stack_addr(addr, n);
|
|
|
|
|
|
|
|
if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Uprobes-specific fetch functions
|
|
|
|
*/
|
|
|
|
#define DEFINE_FETCH_stack(type) \
|
2014-04-17 08:18:00 +00:00
|
|
|
static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
|
|
|
|
void *offset, void *dest) \
|
2013-11-26 06:21:04 +00:00
|
|
|
{ \
|
|
|
|
*(type *)dest = (type)get_user_stack_nth(regs, \
|
|
|
|
((unsigned long)offset)); \
|
|
|
|
}
|
|
|
|
DEFINE_BASIC_FETCH_FUNCS(stack)
|
|
|
|
/* No string on the stack entry */
|
|
|
|
#define fetch_stack_string NULL
|
|
|
|
#define fetch_stack_string_size NULL
|
|
|
|
|
2013-11-26 06:21:04 +00:00
|
|
|
#define DEFINE_FETCH_memory(type) \
|
2014-04-17 08:18:00 +00:00
|
|
|
static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
|
|
|
|
void *addr, void *dest) \
|
2013-11-26 06:21:04 +00:00
|
|
|
{ \
|
|
|
|
type retval; \
|
|
|
|
void __user *vaddr = (void __force __user *) addr; \
|
|
|
|
\
|
|
|
|
if (copy_from_user(&retval, vaddr, sizeof(type))) \
|
|
|
|
*(type *)dest = 0; \
|
|
|
|
else \
|
|
|
|
*(type *) dest = retval; \
|
|
|
|
}
|
|
|
|
DEFINE_BASIC_FETCH_FUNCS(memory)
|
|
|
|
/*
|
|
|
|
* Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
|
|
|
|
* length and relative data location.
|
|
|
|
*/
|
2014-04-17 08:18:00 +00:00
|
|
|
static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
|
|
|
|
void *addr, void *dest)
|
2013-11-26 06:21:04 +00:00
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
u32 rloc = *(u32 *)dest;
|
|
|
|
int maxlen = get_rloc_len(rloc);
|
|
|
|
u8 *dst = get_rloc_data(dest);
|
|
|
|
void __user *src = (void __force __user *) addr;
|
|
|
|
|
|
|
|
if (!maxlen)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = strncpy_from_user(dst, src, maxlen);
|
2018-04-10 12:20:08 +00:00
|
|
|
if (ret == maxlen)
|
|
|
|
dst[--ret] = '\0';
|
2013-11-26 06:21:04 +00:00
|
|
|
|
|
|
|
if (ret < 0) { /* Failed to fetch string */
|
|
|
|
((u8 *)get_rloc_data(dest))[0] = '\0';
|
|
|
|
*(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
|
|
|
|
} else {
|
|
|
|
*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-17 08:18:00 +00:00
|
|
|
static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
|
|
|
|
void *addr, void *dest)
|
2013-11-26 06:21:04 +00:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
void __user *vaddr = (void __force __user *) addr;
|
|
|
|
|
|
|
|
len = strnlen_user(vaddr, MAX_STRING_SIZE);
|
|
|
|
|
|
|
|
if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
|
|
|
|
*(u32 *)dest = 0;
|
|
|
|
else
|
|
|
|
*(u32 *)dest = len;
|
|
|
|
}
|
2013-11-26 06:21:04 +00:00
|
|
|
|
2013-11-25 04:42:47 +00:00
|
|
|
static unsigned long translate_user_vaddr(void *file_offset)
|
|
|
|
{
|
|
|
|
unsigned long base_addr;
|
|
|
|
struct uprobe_dispatch_data *udd;
|
|
|
|
|
|
|
|
udd = (void *) current->utask->vaddr;
|
|
|
|
|
|
|
|
base_addr = udd->bp_addr - udd->tu->offset;
|
|
|
|
return base_addr + (unsigned long)file_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DEFINE_FETCH_file_offset(type) \
|
2014-04-17 08:18:00 +00:00
|
|
|
static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
|
|
|
|
void *offset, void *dest)\
|
2013-11-25 04:42:47 +00:00
|
|
|
{ \
|
|
|
|
void *vaddr = (void *)translate_user_vaddr(offset); \
|
|
|
|
\
|
|
|
|
FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
|
|
|
|
}
|
|
|
|
DEFINE_BASIC_FETCH_FUNCS(file_offset)
|
|
|
|
DEFINE_FETCH_file_offset(string)
|
|
|
|
DEFINE_FETCH_file_offset(string_size)
|
|
|
|
|
2013-11-26 05:56:28 +00:00
|
|
|
/* Fetch type information table */
|
2015-03-12 05:58:34 +00:00
|
|
|
static const struct fetch_type uprobes_fetch_type_table[] = {
|
2013-11-26 05:56:28 +00:00
|
|
|
/* Special types */
|
|
|
|
[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
|
|
|
|
sizeof(u32), 1, "__data_loc char[]"),
|
|
|
|
[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
|
|
|
|
string_size, sizeof(u32), 0, "u32"),
|
|
|
|
/* Basic types */
|
|
|
|
ASSIGN_FETCH_TYPE(u8, u8, 0),
|
|
|
|
ASSIGN_FETCH_TYPE(u16, u16, 0),
|
|
|
|
ASSIGN_FETCH_TYPE(u32, u32, 0),
|
|
|
|
ASSIGN_FETCH_TYPE(u64, u64, 0),
|
|
|
|
ASSIGN_FETCH_TYPE(s8, u8, 1),
|
|
|
|
ASSIGN_FETCH_TYPE(s16, u16, 1),
|
|
|
|
ASSIGN_FETCH_TYPE(s32, u32, 1),
|
|
|
|
ASSIGN_FETCH_TYPE(s64, u64, 1),
|
2016-08-18 08:57:50 +00:00
|
|
|
ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
|
|
|
|
ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
|
|
|
|
ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
|
|
|
|
ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
|
2013-11-26 05:56:28 +00:00
|
|
|
|
|
|
|
ASSIGN_FETCH_TYPE_END
|
|
|
|
};
|
|
|
|
|
2013-02-03 19:58:35 +00:00
|
|
|
static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
|
|
|
|
{
|
|
|
|
rwlock_init(&filter->rwlock);
|
|
|
|
filter->nr_systemwide = 0;
|
|
|
|
INIT_LIST_HEAD(&filter->perf_events);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
|
|
|
|
{
|
|
|
|
return !filter->nr_systemwide && list_empty(&filter->perf_events);
|
|
|
|
}
|
|
|
|
|
2013-03-30 17:25:23 +00:00
|
|
|
static inline bool is_ret_probe(struct trace_uprobe *tu)
|
|
|
|
{
|
|
|
|
return tu->consumer.ret_handler != NULL;
|
|
|
|
}
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
/*
|
|
|
|
* Allocate new trace_uprobe and initialize it (including uprobes).
|
|
|
|
*/
|
|
|
|
static struct trace_uprobe *
|
2013-03-30 17:25:23 +00:00
|
|
|
alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
|
|
|
|
if (!event || !is_good_name(event))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (!group || !is_good_name(group))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
|
|
|
|
if (!tu)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
tu->tp.call.class = &tu->tp.class;
|
|
|
|
tu->tp.call.name = kstrdup(event, GFP_KERNEL);
|
|
|
|
if (!tu->tp.call.name)
|
2012-04-11 10:30:43 +00:00
|
|
|
goto error;
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
tu->tp.class.system = kstrdup(group, GFP_KERNEL);
|
|
|
|
if (!tu->tp.class.system)
|
2012-04-11 10:30:43 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&tu->list);
|
2014-01-17 08:08:38 +00:00
|
|
|
INIT_LIST_HEAD(&tu->tp.files);
|
2013-01-31 18:47:23 +00:00
|
|
|
tu->consumer.handler = uprobe_dispatcher;
|
2013-03-30 17:25:23 +00:00
|
|
|
if (is_ret)
|
|
|
|
tu->consumer.ret_handler = uretprobe_dispatcher;
|
2013-02-03 19:58:35 +00:00
|
|
|
init_trace_uprobe_filter(&tu->filter);
|
2012-04-11 10:30:43 +00:00
|
|
|
return tu;
|
|
|
|
|
|
|
|
error:
|
2013-07-03 06:42:53 +00:00
|
|
|
kfree(tu->tp.call.name);
|
2012-04-11 10:30:43 +00:00
|
|
|
kfree(tu);
|
|
|
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_trace_uprobe(struct trace_uprobe *tu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
for (i = 0; i < tu->tp.nr_args; i++)
|
|
|
|
traceprobe_free_probe_arg(&tu->tp.args[i]);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2018-04-23 17:21:34 +00:00
|
|
|
path_put(&tu->path);
|
2013-07-03 06:42:53 +00:00
|
|
|
kfree(tu->tp.call.class->system);
|
|
|
|
kfree(tu->tp.call.name);
|
2012-04-11 10:30:43 +00:00
|
|
|
kfree(tu->filename);
|
|
|
|
kfree(tu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct trace_uprobe *find_probe_event(const char *event, const char *group)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
|
|
|
|
list_for_each_entry(tu, &uprobe_list, list)
|
2015-05-13 18:20:14 +00:00
|
|
|
if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
|
2013-07-03 06:42:53 +00:00
|
|
|
strcmp(tu->tp.call.class->system, group) == 0)
|
2012-04-11 10:30:43 +00:00
|
|
|
return tu;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
|
2013-07-04 03:33:51 +00:00
|
|
|
static int unregister_trace_uprobe(struct trace_uprobe *tu)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
2013-07-04 03:33:51 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = unregister_uprobe_event(tu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
list_del(&tu->list);
|
|
|
|
free_trace_uprobe(tu);
|
2013-07-04 03:33:51 +00:00
|
|
|
return 0;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Register a trace_uprobe and probe_event */
|
|
|
|
static int register_trace_uprobe(struct trace_uprobe *tu)
|
|
|
|
{
|
2013-07-03 06:42:53 +00:00
|
|
|
struct trace_uprobe *old_tu;
|
2012-04-11 10:30:43 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&uprobe_lock);
|
|
|
|
|
|
|
|
/* register as an event */
|
2015-05-13 18:20:14 +00:00
|
|
|
old_tu = find_probe_event(trace_event_name(&tu->tp.call),
|
2014-04-08 21:26:21 +00:00
|
|
|
tu->tp.call.class->system);
|
2013-07-03 06:42:53 +00:00
|
|
|
if (old_tu) {
|
2012-04-11 10:30:43 +00:00
|
|
|
/* delete old event */
|
2013-07-03 06:42:53 +00:00
|
|
|
ret = unregister_trace_uprobe(old_tu);
|
2013-07-04 03:33:51 +00:00
|
|
|
if (ret)
|
|
|
|
goto end;
|
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
ret = register_uprobe_event(tu);
|
|
|
|
if (ret) {
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Failed to register probe event(%d)\n", ret);
|
2012-04-11 10:30:43 +00:00
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&tu->list, &uprobe_list);
|
|
|
|
|
|
|
|
end:
|
|
|
|
mutex_unlock(&uprobe_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Argument syntax:
|
2013-07-03 07:44:46 +00:00
|
|
|
* - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
|
2012-04-11 10:30:43 +00:00
|
|
|
*
|
|
|
|
* - Remove uprobe: -:[GRP/]EVENT
|
|
|
|
*/
|
|
|
|
static int create_trace_uprobe(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
char *arg, *event, *group, *filename;
|
|
|
|
char buf[MAX_EVENT_NAME_LEN];
|
|
|
|
struct path path;
|
|
|
|
unsigned long offset;
|
2013-03-30 19:28:15 +00:00
|
|
|
bool is_delete, is_return;
|
2012-04-11 10:30:43 +00:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
is_delete = false;
|
2013-03-30 19:28:15 +00:00
|
|
|
is_return = false;
|
2012-04-11 10:30:43 +00:00
|
|
|
event = NULL;
|
|
|
|
group = NULL;
|
|
|
|
|
|
|
|
/* argc must be >= 1 */
|
|
|
|
if (argv[0][0] == '-')
|
|
|
|
is_delete = true;
|
2013-03-30 19:28:15 +00:00
|
|
|
else if (argv[0][0] == 'r')
|
|
|
|
is_return = true;
|
2012-04-11 10:30:43 +00:00
|
|
|
else if (argv[0][0] != 'p') {
|
2013-03-30 19:28:15 +00:00
|
|
|
pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
|
2012-04-11 10:30:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (argv[0][1] == ':') {
|
|
|
|
event = &argv[0][2];
|
|
|
|
arg = strchr(event, '/');
|
|
|
|
|
|
|
|
if (arg) {
|
|
|
|
group = event;
|
|
|
|
event = arg + 1;
|
|
|
|
event[-1] = '\0';
|
|
|
|
|
|
|
|
if (strlen(group) == 0) {
|
|
|
|
pr_info("Group name is not specified\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (strlen(event) == 0) {
|
|
|
|
pr_info("Event name is not specified\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!group)
|
|
|
|
group = UPROBE_EVENT_SYSTEM;
|
|
|
|
|
|
|
|
if (is_delete) {
|
2013-07-04 03:33:51 +00:00
|
|
|
int ret;
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
if (!event) {
|
|
|
|
pr_info("Delete command needs an event name.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
mutex_lock(&uprobe_lock);
|
|
|
|
tu = find_probe_event(event, group);
|
|
|
|
|
|
|
|
if (!tu) {
|
|
|
|
mutex_unlock(&uprobe_lock);
|
|
|
|
pr_info("Event %s/%s doesn't exist.\n", group, event);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
/* delete an event */
|
2013-07-04 03:33:51 +00:00
|
|
|
ret = unregister_trace_uprobe(tu);
|
2012-04-11 10:30:43 +00:00
|
|
|
mutex_unlock(&uprobe_lock);
|
2013-07-04 03:33:51 +00:00
|
|
|
return ret;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (argc < 2) {
|
|
|
|
pr_info("Probe point is not specified.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-01-13 16:58:34 +00:00
|
|
|
/* Find the last occurrence, in case the path contains ':' too. */
|
|
|
|
arg = strrchr(argv[1], ':');
|
2018-04-23 17:21:34 +00:00
|
|
|
if (!arg)
|
|
|
|
return -EINVAL;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
*arg++ = '\0';
|
|
|
|
filename = argv[1];
|
|
|
|
ret = kern_path(filename, LOOKUP_FOLLOW, &path);
|
|
|
|
if (ret)
|
2018-04-23 17:21:34 +00:00
|
|
|
return ret;
|
2013-01-27 17:20:45 +00:00
|
|
|
|
2018-04-23 17:21:34 +00:00
|
|
|
if (!d_is_reg(path.dentry)) {
|
2012-07-18 10:16:44 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail_address_parse;
|
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-01-27 17:20:45 +00:00
|
|
|
ret = kstrtoul(arg, 0, &offset);
|
|
|
|
if (ret)
|
|
|
|
goto fail_address_parse;
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
argc -= 2;
|
|
|
|
argv += 2;
|
|
|
|
|
|
|
|
/* setup a probe */
|
|
|
|
if (!event) {
|
2012-12-18 00:01:27 +00:00
|
|
|
char *tail;
|
2012-04-11 10:30:43 +00:00
|
|
|
char *ptr;
|
|
|
|
|
2012-12-18 00:01:27 +00:00
|
|
|
tail = kstrdup(kbasename(filename), GFP_KERNEL);
|
|
|
|
if (!tail) {
|
2012-04-11 10:30:43 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_address_parse;
|
|
|
|
}
|
|
|
|
|
|
|
|
ptr = strpbrk(tail, ".-_");
|
|
|
|
if (ptr)
|
|
|
|
*ptr = '\0';
|
|
|
|
|
|
|
|
snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
|
|
|
|
event = buf;
|
|
|
|
kfree(tail);
|
|
|
|
}
|
|
|
|
|
2013-03-30 19:28:15 +00:00
|
|
|
tu = alloc_trace_uprobe(group, event, argc, is_return);
|
2012-04-11 10:30:43 +00:00
|
|
|
if (IS_ERR(tu)) {
|
|
|
|
pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
|
|
|
|
ret = PTR_ERR(tu);
|
|
|
|
goto fail_address_parse;
|
|
|
|
}
|
|
|
|
tu->offset = offset;
|
2018-04-23 17:21:34 +00:00
|
|
|
tu->path = path;
|
2012-04-11 10:30:43 +00:00
|
|
|
tu->filename = kstrdup(filename, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!tu->filename) {
|
|
|
|
pr_info("Failed to allocate filename.\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* parse arguments */
|
|
|
|
ret = 0;
|
|
|
|
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
|
2013-07-03 06:42:53 +00:00
|
|
|
struct probe_arg *parg = &tu->tp.args[i];
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
/* Increment count for freeing args in error case */
|
2013-07-03 06:42:53 +00:00
|
|
|
tu->tp.nr_args++;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
/* Parse argument name */
|
|
|
|
arg = strchr(argv[i], '=');
|
|
|
|
if (arg) {
|
|
|
|
*arg++ = '\0';
|
2013-07-03 06:42:53 +00:00
|
|
|
parg->name = kstrdup(argv[i], GFP_KERNEL);
|
2012-04-11 10:30:43 +00:00
|
|
|
} else {
|
|
|
|
arg = argv[i];
|
|
|
|
/* If argument name is omitted, set "argN" */
|
|
|
|
snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
|
2013-07-03 06:42:53 +00:00
|
|
|
parg->name = kstrdup(buf, GFP_KERNEL);
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
if (!parg->name) {
|
2012-04-11 10:30:43 +00:00
|
|
|
pr_info("Failed to allocate argument[%d] name.\n", i);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
if (!is_good_name(parg->name)) {
|
|
|
|
pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
|
2012-04-11 10:30:43 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
|
2012-04-11 10:30:43 +00:00
|
|
|
pr_info("Argument[%d] name '%s' conflicts with "
|
|
|
|
"another field.\n", i, argv[i]);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse fetch argument */
|
2013-07-03 06:42:53 +00:00
|
|
|
ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
|
2015-03-12 05:58:34 +00:00
|
|
|
is_return, false,
|
|
|
|
uprobes_fetch_type_table);
|
2012-04-11 10:30:43 +00:00
|
|
|
if (ret) {
|
|
|
|
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = register_trace_uprobe(tu);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
free_trace_uprobe(tu);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
fail_address_parse:
|
2018-04-23 17:21:34 +00:00
|
|
|
path_put(&path);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2012-07-18 10:16:44 +00:00
|
|
|
pr_info("Failed to parse address or file.\n");
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-07-04 03:33:51 +00:00
|
|
|
static int cleanup_all_probes(void)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
2013-07-04 03:33:51 +00:00
|
|
|
int ret = 0;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
mutex_lock(&uprobe_lock);
|
|
|
|
while (!list_empty(&uprobe_list)) {
|
|
|
|
tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
|
2013-07-04 03:33:51 +00:00
|
|
|
ret = unregister_trace_uprobe(tu);
|
|
|
|
if (ret)
|
|
|
|
break;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
mutex_unlock(&uprobe_lock);
|
2013-07-04 03:33:51 +00:00
|
|
|
return ret;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Probes listing interfaces */
|
|
|
|
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
|
|
|
mutex_lock(&uprobe_lock);
|
|
|
|
return seq_list_start(&uprobe_list, *pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
return seq_list_next(v, &uprobe_list, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void probes_seq_stop(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
mutex_unlock(&uprobe_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int probes_seq_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu = v;
|
2013-03-30 18:48:09 +00:00
|
|
|
char c = is_ret_probe(tu) ? 'r' : 'p';
|
2012-04-11 10:30:43 +00:00
|
|
|
int i;
|
|
|
|
|
2018-03-15 08:27:56 +00:00
|
|
|
seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
|
|
|
|
trace_event_name(&tu->tp.call), tu->filename,
|
|
|
|
(int)(sizeof(void *) * 2), tu->offset);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
for (i = 0; i < tu->tp.nr_args; i++)
|
|
|
|
seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_putc(m, '\n');
|
2012-04-11 10:30:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations probes_seq_op = {
|
|
|
|
.start = probes_seq_start,
|
|
|
|
.next = probes_seq_next,
|
|
|
|
.stop = probes_seq_stop,
|
|
|
|
.show = probes_seq_show
|
|
|
|
};
|
|
|
|
|
|
|
|
static int probes_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-07-04 03:33:51 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
|
|
|
|
ret = cleanup_all_probes();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
return seq_open(file, &probes_seq_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t probes_write(struct file *file, const char __user *buffer,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2017-09-22 19:58:20 +00:00
|
|
|
return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations uprobe_events_ops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = probes_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
.write = probes_write,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Probes profiling interfaces */
|
|
|
|
static int probes_profile_seq_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu = v;
|
|
|
|
|
2014-04-08 21:26:21 +00:00
|
|
|
seq_printf(m, " %s %-44s %15lu\n", tu->filename,
|
2015-05-13 18:20:14 +00:00
|
|
|
trace_event_name(&tu->tp.call), tu->nhit);
|
2012-04-11 10:30:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations profile_seq_op = {
|
|
|
|
.start = probes_seq_start,
|
|
|
|
.next = probes_seq_next,
|
|
|
|
.stop = probes_seq_stop,
|
|
|
|
.show = probes_profile_seq_show
|
|
|
|
};
|
|
|
|
|
|
|
|
static int profile_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return seq_open(file, &profile_seq_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations uprobe_profile_ops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = profile_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
struct uprobe_cpu_buffer {
|
|
|
|
struct mutex mutex;
|
|
|
|
void *buf;
|
|
|
|
};
|
|
|
|
static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
|
|
|
|
static int uprobe_buffer_refcnt;
|
|
|
|
|
|
|
|
static int uprobe_buffer_init(void)
|
|
|
|
{
|
|
|
|
int cpu, err_cpu;
|
|
|
|
|
|
|
|
uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
|
|
|
|
if (uprobe_cpu_buffer == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct page *p = alloc_pages_node(cpu_to_node(cpu),
|
|
|
|
GFP_KERNEL, 0);
|
|
|
|
if (p == NULL) {
|
|
|
|
err_cpu = cpu;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
|
|
|
|
mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
if (cpu == err_cpu)
|
|
|
|
break;
|
|
|
|
free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
free_percpu(uprobe_cpu_buffer);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uprobe_buffer_enable(void)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
BUG_ON(!mutex_is_locked(&event_mutex));
|
|
|
|
|
|
|
|
if (uprobe_buffer_refcnt++ == 0) {
|
|
|
|
ret = uprobe_buffer_init();
|
|
|
|
if (ret < 0)
|
|
|
|
uprobe_buffer_refcnt--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uprobe_buffer_disable(void)
|
|
|
|
{
|
2014-04-17 08:05:19 +00:00
|
|
|
int cpu;
|
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
BUG_ON(!mutex_is_locked(&event_mutex));
|
|
|
|
|
|
|
|
if (--uprobe_buffer_refcnt == 0) {
|
2014-04-17 08:05:19 +00:00
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
|
|
|
|
cpu)->buf);
|
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
free_percpu(uprobe_cpu_buffer);
|
|
|
|
uprobe_cpu_buffer = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
|
|
|
|
{
|
|
|
|
struct uprobe_cpu_buffer *ucb;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
cpu = raw_smp_processor_id();
|
|
|
|
ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use per-cpu buffers for fastest access, but we might migrate
|
|
|
|
* so the mutex makes sure we have sole access to it.
|
|
|
|
*/
|
|
|
|
mutex_lock(&ucb->mutex);
|
|
|
|
|
|
|
|
return ucb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
|
|
|
|
{
|
|
|
|
mutex_unlock(&ucb->mutex);
|
|
|
|
}
|
|
|
|
|
2014-01-17 08:08:36 +00:00
|
|
|
static void __uprobe_trace_func(struct trace_uprobe *tu,
|
2014-01-17 08:08:37 +00:00
|
|
|
unsigned long func, struct pt_regs *regs,
|
2014-01-17 08:08:38 +00:00
|
|
|
struct uprobe_cpu_buffer *ucb, int dsize,
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *trace_file)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
|
|
|
struct uprobe_trace_entry_head *entry;
|
|
|
|
struct ring_buffer_event *event;
|
|
|
|
struct ring_buffer *buffer;
|
2013-03-29 17:26:51 +00:00
|
|
|
void *data;
|
2014-01-17 08:08:37 +00:00
|
|
|
int size, esize;
|
2015-05-05 15:45:27 +00:00
|
|
|
struct trace_event_call *call = &tu->tp.call;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2015-05-05 14:09:53 +00:00
|
|
|
WARN_ON(call != trace_file->event_call);
|
2014-01-17 08:08:38 +00:00
|
|
|
|
2014-01-17 08:08:37 +00:00
|
|
|
if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
|
2013-07-03 07:40:28 +00:00
|
|
|
return;
|
|
|
|
|
2015-05-13 19:21:25 +00:00
|
|
|
if (trace_trigger_soft_disabled(trace_file))
|
2014-01-17 08:08:39 +00:00
|
|
|
return;
|
|
|
|
|
2014-01-17 08:08:37 +00:00
|
|
|
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
2013-07-03 07:40:28 +00:00
|
|
|
size = esize + tu->tp.size + dsize;
|
2015-05-05 14:09:53 +00:00
|
|
|
event = trace_event_buffer_lock_reserve(&buffer, trace_file,
|
2014-01-17 08:08:38 +00:00
|
|
|
call->event.type, size, 0, 0);
|
2012-04-11 10:30:43 +00:00
|
|
|
if (!event)
|
2014-01-17 08:08:37 +00:00
|
|
|
return;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
entry = ring_buffer_event_data(event);
|
2013-03-30 17:46:22 +00:00
|
|
|
if (is_ret_probe(tu)) {
|
|
|
|
entry->vaddr[0] = func;
|
|
|
|
entry->vaddr[1] = instruction_pointer(regs);
|
|
|
|
data = DATAOF_TRACE_ENTRY(entry, true);
|
|
|
|
} else {
|
|
|
|
entry->vaddr[0] = instruction_pointer(regs);
|
|
|
|
data = DATAOF_TRACE_ENTRY(entry, false);
|
|
|
|
}
|
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
memcpy(data, ucb->buf, tu->tp.size + dsize);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2015-05-05 14:09:53 +00:00
|
|
|
event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
|
2013-03-30 17:02:12 +00:00
|
|
|
}
|
2013-02-04 16:48:34 +00:00
|
|
|
|
2013-03-30 17:02:12 +00:00
|
|
|
/* uprobe handler */
|
2014-01-17 08:08:37 +00:00
|
|
|
static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
|
|
|
|
struct uprobe_cpu_buffer *ucb, int dsize)
|
2013-03-30 17:02:12 +00:00
|
|
|
{
|
2014-01-17 08:08:38 +00:00
|
|
|
struct event_file_link *link;
|
|
|
|
|
|
|
|
if (is_ret_probe(tu))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(link, &tu->tp.files, list)
|
|
|
|
__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2013-02-04 16:48:34 +00:00
|
|
|
return 0;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2013-03-30 17:25:23 +00:00
|
|
|
static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
|
2014-01-17 08:08:37 +00:00
|
|
|
struct pt_regs *regs,
|
|
|
|
struct uprobe_cpu_buffer *ucb, int dsize)
|
2013-03-30 17:25:23 +00:00
|
|
|
{
|
2014-01-17 08:08:38 +00:00
|
|
|
struct event_file_link *link;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(link, &tu->tp.files, list)
|
|
|
|
__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
|
|
|
|
rcu_read_unlock();
|
2013-03-30 17:25:23 +00:00
|
|
|
}
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
/* Event entry printers */
|
|
|
|
static enum print_line_t
|
|
|
|
print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
|
|
|
|
{
|
2013-03-29 17:26:51 +00:00
|
|
|
struct uprobe_trace_entry_head *entry;
|
2012-04-11 10:30:43 +00:00
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
u8 *data;
|
|
|
|
int i;
|
|
|
|
|
2013-03-29 17:26:51 +00:00
|
|
|
entry = (struct uprobe_trace_entry_head *)iter->ent;
|
2013-07-03 06:42:53 +00:00
|
|
|
tu = container_of(event, struct trace_uprobe, tp.call.event);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-03-30 18:48:09 +00:00
|
|
|
if (is_ret_probe(tu)) {
|
2014-11-12 22:26:57 +00:00
|
|
|
trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
|
2015-05-13 18:20:14 +00:00
|
|
|
trace_event_name(&tu->tp.call),
|
2014-11-12 22:26:57 +00:00
|
|
|
entry->vaddr[1], entry->vaddr[0]);
|
2013-03-30 18:48:09 +00:00
|
|
|
data = DATAOF_TRACE_ENTRY(entry, true);
|
|
|
|
} else {
|
2014-11-12 22:26:57 +00:00
|
|
|
trace_seq_printf(s, "%s: (0x%lx)",
|
2015-05-13 18:20:14 +00:00
|
|
|
trace_event_name(&tu->tp.call),
|
2014-11-12 22:26:57 +00:00
|
|
|
entry->vaddr[0]);
|
2013-03-30 18:48:09 +00:00
|
|
|
data = DATAOF_TRACE_ENTRY(entry, false);
|
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
for (i = 0; i < tu->tp.nr_args; i++) {
|
|
|
|
struct probe_arg *parg = &tu->tp.args[i];
|
|
|
|
|
|
|
|
if (!parg->type->print(s, parg->name, data + parg->offset, entry))
|
2014-11-12 22:26:57 +00:00
|
|
|
goto out;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2014-11-12 22:26:57 +00:00
|
|
|
trace_seq_putc(s, '\n');
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2014-11-12 22:26:57 +00:00
|
|
|
out:
|
|
|
|
return trace_handle_return(s);
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2013-02-04 16:11:58 +00:00
|
|
|
typedef bool (*filter_func_t)(struct uprobe_consumer *self,
|
|
|
|
enum uprobe_filter_ctx ctx,
|
|
|
|
struct mm_struct *mm);
|
|
|
|
|
|
|
|
static int
|
2015-05-05 14:09:53 +00:00
|
|
|
probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
|
2014-01-17 08:08:38 +00:00
|
|
|
filter_func_t filter)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
2014-01-17 08:08:38 +00:00
|
|
|
bool enabled = trace_probe_is_enabled(&tu->tp);
|
|
|
|
struct event_file_link *link = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (file) {
|
2014-06-27 17:01:36 +00:00
|
|
|
if (tu->tp.flags & TP_FLAG_PROFILE)
|
|
|
|
return -EINTR;
|
|
|
|
|
2014-01-17 08:08:38 +00:00
|
|
|
link = kmalloc(sizeof(*link), GFP_KERNEL);
|
|
|
|
if (!link)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
link->file = file;
|
|
|
|
list_add_tail_rcu(&link->list, &tu->tp.files);
|
|
|
|
|
|
|
|
tu->tp.flags |= TP_FLAG_TRACE;
|
2014-06-27 17:01:36 +00:00
|
|
|
} else {
|
|
|
|
if (tu->tp.flags & TP_FLAG_TRACE)
|
|
|
|
return -EINTR;
|
|
|
|
|
2014-01-17 08:08:38 +00:00
|
|
|
tu->tp.flags |= TP_FLAG_PROFILE;
|
2014-06-27 17:01:36 +00:00
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-02-03 19:58:35 +00:00
|
|
|
WARN_ON(!uprobe_filter_is_empty(&tu->filter));
|
|
|
|
|
2014-01-17 08:08:38 +00:00
|
|
|
if (enabled)
|
|
|
|
return 0;
|
|
|
|
|
2014-06-27 17:01:46 +00:00
|
|
|
ret = uprobe_buffer_enable();
|
|
|
|
if (ret)
|
|
|
|
goto err_flags;
|
|
|
|
|
2013-02-04 16:11:58 +00:00
|
|
|
tu->consumer.filter = filter;
|
2018-04-23 17:21:34 +00:00
|
|
|
tu->inode = d_real_inode(tu->path.dentry);
|
2013-01-31 18:47:23 +00:00
|
|
|
ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
|
2014-06-27 17:01:46 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_buffer;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_buffer:
|
|
|
|
uprobe_buffer_disable();
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2014-06-27 17:01:46 +00:00
|
|
|
err_flags:
|
|
|
|
if (file) {
|
|
|
|
list_del(&link->list);
|
|
|
|
kfree(link);
|
|
|
|
tu->tp.flags &= ~TP_FLAG_TRACE;
|
|
|
|
} else {
|
|
|
|
tu->tp.flags &= ~TP_FLAG_PROFILE;
|
|
|
|
}
|
2013-01-27 17:36:24 +00:00
|
|
|
return ret;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2014-01-17 08:08:38 +00:00
|
|
|
static void
|
2015-05-05 14:09:53 +00:00
|
|
|
probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
2013-07-03 06:42:53 +00:00
|
|
|
if (!trace_probe_is_enabled(&tu->tp))
|
2012-04-11 10:30:43 +00:00
|
|
|
return;
|
|
|
|
|
2014-01-17 08:08:38 +00:00
|
|
|
if (file) {
|
|
|
|
struct event_file_link *link;
|
|
|
|
|
|
|
|
link = find_event_file_link(&tu->tp, file);
|
|
|
|
if (!link)
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_del_rcu(&link->list);
|
|
|
|
/* synchronize with u{,ret}probe_trace_func */
|
2018-08-09 19:37:59 +00:00
|
|
|
synchronize_rcu();
|
2014-01-17 08:08:38 +00:00
|
|
|
kfree(link);
|
|
|
|
|
|
|
|
if (!list_empty(&tu->tp.files))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-02-03 19:58:35 +00:00
|
|
|
WARN_ON(!uprobe_filter_is_empty(&tu->filter));
|
|
|
|
|
2013-01-31 18:47:23 +00:00
|
|
|
uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
|
2018-04-23 17:21:34 +00:00
|
|
|
tu->inode = NULL;
|
2014-01-17 08:08:38 +00:00
|
|
|
tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
|
2013-07-03 07:40:28 +00:00
|
|
|
|
|
|
|
uprobe_buffer_disable();
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2015-05-05 15:45:27 +00:00
|
|
|
static int uprobe_event_define_fields(struct trace_event_call *event_call)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
2013-03-29 17:26:51 +00:00
|
|
|
int ret, i, size;
|
2012-04-11 10:30:43 +00:00
|
|
|
struct uprobe_trace_entry_head field;
|
2013-03-29 17:26:51 +00:00
|
|
|
struct trace_uprobe *tu = event_call->data;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-03-30 18:23:15 +00:00
|
|
|
if (is_ret_probe(tu)) {
|
|
|
|
DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
|
|
|
|
DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
|
|
|
|
size = SIZEOF_TRACE_ENTRY(true);
|
|
|
|
} else {
|
|
|
|
DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
|
|
|
|
size = SIZEOF_TRACE_ENTRY(false);
|
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
/* Set argument names as fields */
|
2013-07-03 06:42:53 +00:00
|
|
|
for (i = 0; i < tu->tp.nr_args; i++) {
|
|
|
|
struct probe_arg *parg = &tu->tp.args[i];
|
|
|
|
|
|
|
|
ret = trace_define_field(event_call, parg->type->fmttype,
|
|
|
|
parg->name, size + parg->offset,
|
|
|
|
parg->type->size, parg->type->is_signed,
|
2012-04-11 10:30:43 +00:00
|
|
|
FILTER_OTHER);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2013-02-04 16:11:58 +00:00
|
|
|
static bool
|
|
|
|
__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct perf_event *event;
|
|
|
|
|
|
|
|
if (filter->nr_systemwide)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
|
2015-03-05 21:10:19 +00:00
|
|
|
if (event->hw.target->mm == mm)
|
2013-02-04 16:11:58 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-02-04 18:05:43 +00:00
|
|
|
static inline bool
|
|
|
|
uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
|
|
|
|
{
|
2015-03-05 21:10:19 +00:00
|
|
|
return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
|
2013-02-04 18:05:43 +00:00
|
|
|
}
|
|
|
|
|
2014-04-24 11:26:01 +00:00
|
|
|
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
|
2013-02-03 19:58:35 +00:00
|
|
|
{
|
2013-02-04 18:05:43 +00:00
|
|
|
bool done;
|
|
|
|
|
2013-02-03 19:58:35 +00:00
|
|
|
write_lock(&tu->filter.rwlock);
|
2015-03-05 21:10:19 +00:00
|
|
|
if (event->hw.target) {
|
2014-04-24 11:26:01 +00:00
|
|
|
list_del(&event->hw.tp_list);
|
2013-02-04 18:05:43 +00:00
|
|
|
done = tu->filter.nr_systemwide ||
|
2015-03-05 21:10:19 +00:00
|
|
|
(event->hw.target->flags & PF_EXITING) ||
|
2013-02-04 18:05:43 +00:00
|
|
|
uprobe_filter_event(tu, event);
|
|
|
|
} else {
|
2014-04-24 11:26:01 +00:00
|
|
|
tu->filter.nr_systemwide--;
|
2013-02-04 18:05:43 +00:00
|
|
|
done = tu->filter.nr_systemwide;
|
|
|
|
}
|
2013-02-03 19:58:35 +00:00
|
|
|
write_unlock(&tu->filter.rwlock);
|
|
|
|
|
2013-02-04 18:05:43 +00:00
|
|
|
if (!done)
|
2014-04-24 11:33:31 +00:00
|
|
|
return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
|
2013-02-04 16:11:58 +00:00
|
|
|
|
2013-02-03 19:58:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-24 11:26:01 +00:00
|
|
|
static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
|
2013-02-03 19:58:35 +00:00
|
|
|
{
|
2013-02-04 18:05:43 +00:00
|
|
|
bool done;
|
2014-04-24 11:33:31 +00:00
|
|
|
int err;
|
2013-02-04 18:05:43 +00:00
|
|
|
|
2013-02-03 19:58:35 +00:00
|
|
|
write_lock(&tu->filter.rwlock);
|
2015-03-05 21:10:19 +00:00
|
|
|
if (event->hw.target) {
|
2014-04-24 11:26:01 +00:00
|
|
|
/*
|
|
|
|
* event->parent != NULL means copy_process(), we can avoid
|
|
|
|
* uprobe_apply(). current->mm must be probed and we can rely
|
|
|
|
* on dup_mmap() which preserves the already installed bp's.
|
|
|
|
*
|
|
|
|
* attr.enable_on_exec means that exec/mmap will install the
|
|
|
|
* breakpoints we need.
|
|
|
|
*/
|
2013-02-04 18:05:43 +00:00
|
|
|
done = tu->filter.nr_systemwide ||
|
2014-04-24 11:26:01 +00:00
|
|
|
event->parent || event->attr.enable_on_exec ||
|
2013-02-04 18:05:43 +00:00
|
|
|
uprobe_filter_event(tu, event);
|
2014-04-24 11:26:01 +00:00
|
|
|
list_add(&event->hw.tp_list, &tu->filter.perf_events);
|
2013-02-04 18:05:43 +00:00
|
|
|
} else {
|
|
|
|
done = tu->filter.nr_systemwide;
|
2014-04-24 11:26:01 +00:00
|
|
|
tu->filter.nr_systemwide++;
|
2013-02-04 18:05:43 +00:00
|
|
|
}
|
2013-02-03 19:58:35 +00:00
|
|
|
write_unlock(&tu->filter.rwlock);
|
|
|
|
|
2014-04-24 11:33:31 +00:00
|
|
|
err = 0;
|
|
|
|
if (!done) {
|
|
|
|
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
|
|
|
|
if (err)
|
|
|
|
uprobe_perf_close(tu, event);
|
|
|
|
}
|
|
|
|
return err;
|
2013-02-03 19:58:35 +00:00
|
|
|
}
|
|
|
|
|
2013-02-04 16:11:58 +00:00
|
|
|
static bool uprobe_perf_filter(struct uprobe_consumer *uc,
|
|
|
|
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
tu = container_of(uc, struct trace_uprobe, consumer);
|
|
|
|
read_lock(&tu->filter.rwlock);
|
|
|
|
ret = __uprobe_perf_filter(&tu->filter, mm);
|
|
|
|
read_unlock(&tu->filter.rwlock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-01-17 08:08:36 +00:00
|
|
|
static void __uprobe_perf_func(struct trace_uprobe *tu,
|
2014-01-17 08:08:37 +00:00
|
|
|
unsigned long func, struct pt_regs *regs,
|
|
|
|
struct uprobe_cpu_buffer *ucb, int dsize)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
2015-05-05 15:45:27 +00:00
|
|
|
struct trace_event_call *call = &tu->tp.call;
|
2012-04-11 10:30:43 +00:00
|
|
|
struct uprobe_trace_entry_head *entry;
|
|
|
|
struct hlist_head *head;
|
2013-03-29 17:26:51 +00:00
|
|
|
void *data;
|
2014-01-17 08:08:37 +00:00
|
|
|
int size, esize;
|
2013-07-03 07:40:28 +00:00
|
|
|
int rctx;
|
|
|
|
|
2017-10-24 06:53:08 +00:00
|
|
|
if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
|
2015-07-01 02:13:50 +00:00
|
|
|
return;
|
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
size = esize + tu->tp.size + dsize;
|
|
|
|
size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
|
|
|
|
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
|
|
|
|
return;
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
preempt_disable();
|
2013-04-13 13:36:49 +00:00
|
|
|
head = this_cpu_ptr(call->perf_events);
|
|
|
|
if (hlist_empty(head))
|
|
|
|
goto out;
|
|
|
|
|
2016-04-07 01:43:24 +00:00
|
|
|
entry = perf_trace_buf_alloc(size, NULL, &rctx);
|
2012-04-11 10:30:43 +00:00
|
|
|
if (!entry)
|
|
|
|
goto out;
|
|
|
|
|
2013-03-30 17:46:22 +00:00
|
|
|
if (is_ret_probe(tu)) {
|
|
|
|
entry->vaddr[0] = func;
|
2013-04-10 14:25:49 +00:00
|
|
|
entry->vaddr[1] = instruction_pointer(regs);
|
2013-03-30 17:46:22 +00:00
|
|
|
data = DATAOF_TRACE_ENTRY(entry, true);
|
|
|
|
} else {
|
2013-04-10 14:25:49 +00:00
|
|
|
entry->vaddr[0] = instruction_pointer(regs);
|
2013-03-30 17:46:22 +00:00
|
|
|
data = DATAOF_TRACE_ENTRY(entry, false);
|
|
|
|
}
|
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
memcpy(data, ucb->buf, tu->tp.size + dsize);
|
|
|
|
|
|
|
|
if (size - esize > tu->tp.size + dsize) {
|
|
|
|
int len = tu->tp.size + dsize;
|
2013-07-03 06:42:53 +00:00
|
|
|
|
2013-07-03 07:40:28 +00:00
|
|
|
memset(data + len, 0, size - esize - len);
|
2013-07-03 06:42:53 +00:00
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2016-04-07 01:43:24 +00:00
|
|
|
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
|
2017-10-11 07:45:29 +00:00
|
|
|
head, NULL);
|
2012-04-11 10:30:43 +00:00
|
|
|
out:
|
|
|
|
preempt_enable();
|
2013-03-30 17:02:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* uprobe profile handler */
|
2014-01-17 08:08:37 +00:00
|
|
|
static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
|
|
|
|
struct uprobe_cpu_buffer *ucb, int dsize)
|
2013-03-30 17:02:12 +00:00
|
|
|
{
|
|
|
|
if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
|
|
|
|
return UPROBE_HANDLER_REMOVE;
|
|
|
|
|
2013-03-30 17:46:22 +00:00
|
|
|
if (!is_ret_probe(tu))
|
2014-01-17 08:08:37 +00:00
|
|
|
__uprobe_perf_func(tu, 0, regs, ucb, dsize);
|
2013-02-04 16:48:34 +00:00
|
|
|
return 0;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
2013-03-30 17:25:23 +00:00
|
|
|
|
|
|
|
static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
|
2014-01-17 08:08:37 +00:00
|
|
|
struct pt_regs *regs,
|
|
|
|
struct uprobe_cpu_buffer *ucb, int dsize)
|
2013-03-30 17:25:23 +00:00
|
|
|
{
|
2014-01-17 08:08:37 +00:00
|
|
|
__uprobe_perf_func(tu, func, regs, ucb, dsize);
|
2013-03-30 17:25:23 +00:00
|
|
|
}
|
bpf: introduce bpf subcommand BPF_TASK_FD_QUERY
Currently, suppose a userspace application has loaded a bpf program
and attached it to a tracepoint/kprobe/uprobe, and a bpf
introspection tool, e.g., bpftool, wants to show which bpf program
is attached to which tracepoint/kprobe/uprobe. Such attachment
information will be really useful to understand the overall bpf
deployment in the system.
There is a name field (16 bytes) for each program, which could
be used to encode the attachment point. There are some drawbacks
for this approaches. First, bpftool user (e.g., an admin) may not
really understand the association between the name and the
attachment point. Second, if one program is attached to multiple
places, encoding a proper name which can imply all these
attachments becomes difficult.
This patch introduces a new bpf subcommand BPF_TASK_FD_QUERY.
Given a pid and fd, if the <pid, fd> is associated with a
tracepoint/kprobe/uprobe perf event, BPF_TASK_FD_QUERY will return
. prog_id
. tracepoint name, or
. k[ret]probe funcname + offset or kernel addr, or
. u[ret]probe filename + offset
to the userspace.
The user can use "bpftool prog" to find more information about
bpf program itself with prog_id.
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-05-24 18:21:09 +00:00
|
|
|
|
|
|
|
int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
|
|
|
|
const char **filename, u64 *probe_offset,
|
|
|
|
bool perf_type_tracepoint)
|
|
|
|
{
|
|
|
|
const char *pevent = trace_event_name(event->tp_event);
|
|
|
|
const char *group = event->tp_event->class->system;
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
|
|
|
|
if (perf_type_tracepoint)
|
|
|
|
tu = find_probe_event(pevent, group);
|
|
|
|
else
|
|
|
|
tu = event->tp_event->data;
|
|
|
|
if (!tu)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
|
|
|
|
: BPF_FD_TYPE_UPROBE;
|
|
|
|
*filename = tu->filename;
|
|
|
|
*probe_offset = tu->offset;
|
|
|
|
return 0;
|
|
|
|
}
|
2012-04-11 10:30:43 +00:00
|
|
|
#endif /* CONFIG_PERF_EVENTS */
|
|
|
|
|
2014-01-17 08:08:38 +00:00
|
|
|
static int
|
2015-05-05 15:45:27 +00:00
|
|
|
trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
|
2014-01-17 08:08:38 +00:00
|
|
|
void *data)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
2013-03-29 17:26:51 +00:00
|
|
|
struct trace_uprobe *tu = event->data;
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *file = data;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case TRACE_REG_REGISTER:
|
2014-01-17 08:08:38 +00:00
|
|
|
return probe_event_enable(tu, file, NULL);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
case TRACE_REG_UNREGISTER:
|
2014-01-17 08:08:38 +00:00
|
|
|
probe_event_disable(tu, file);
|
2012-04-11 10:30:43 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
case TRACE_REG_PERF_REGISTER:
|
2014-01-17 08:08:38 +00:00
|
|
|
return probe_event_enable(tu, NULL, uprobe_perf_filter);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
case TRACE_REG_PERF_UNREGISTER:
|
2014-01-17 08:08:38 +00:00
|
|
|
probe_event_disable(tu, NULL);
|
2012-04-11 10:30:43 +00:00
|
|
|
return 0;
|
2013-02-03 19:58:35 +00:00
|
|
|
|
|
|
|
case TRACE_REG_PERF_OPEN:
|
|
|
|
return uprobe_perf_open(tu, data);
|
|
|
|
|
|
|
|
case TRACE_REG_PERF_CLOSE:
|
|
|
|
return uprobe_perf_close(tu, data);
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
2013-11-25 04:42:47 +00:00
|
|
|
struct uprobe_dispatch_data udd;
|
2014-01-17 08:08:37 +00:00
|
|
|
struct uprobe_cpu_buffer *ucb;
|
|
|
|
int dsize, esize;
|
2013-02-04 16:48:34 +00:00
|
|
|
int ret = 0;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2014-01-17 08:08:37 +00:00
|
|
|
|
2013-01-31 18:47:23 +00:00
|
|
|
tu = container_of(con, struct trace_uprobe, consumer);
|
2013-01-31 18:55:27 +00:00
|
|
|
tu->nhit++;
|
2012-04-11 10:30:43 +00:00
|
|
|
|
2013-11-25 04:42:47 +00:00
|
|
|
udd.tu = tu;
|
|
|
|
udd.bp_addr = instruction_pointer(regs);
|
|
|
|
|
|
|
|
current->utask->vaddr = (unsigned long) &udd;
|
|
|
|
|
2014-01-17 08:08:37 +00:00
|
|
|
if (WARN_ON_ONCE(!uprobe_cpu_buffer))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dsize = __get_data_size(&tu->tp, regs);
|
|
|
|
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
|
|
|
|
|
|
|
ucb = uprobe_buffer_get();
|
|
|
|
store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
if (tu->tp.flags & TP_FLAG_TRACE)
|
2014-01-17 08:08:37 +00:00
|
|
|
ret |= uprobe_trace_func(tu, regs, ucb, dsize);
|
2012-04-11 10:30:43 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2013-07-03 06:42:53 +00:00
|
|
|
if (tu->tp.flags & TP_FLAG_PROFILE)
|
2014-01-17 08:08:37 +00:00
|
|
|
ret |= uprobe_perf_func(tu, regs, ucb, dsize);
|
2012-04-11 10:30:43 +00:00
|
|
|
#endif
|
2014-01-17 08:08:37 +00:00
|
|
|
uprobe_buffer_put(ucb);
|
2013-02-04 16:48:34 +00:00
|
|
|
return ret;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2013-03-30 17:25:23 +00:00
|
|
|
static int uretprobe_dispatcher(struct uprobe_consumer *con,
|
|
|
|
unsigned long func, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
2013-11-25 04:42:47 +00:00
|
|
|
struct uprobe_dispatch_data udd;
|
2014-01-17 08:08:37 +00:00
|
|
|
struct uprobe_cpu_buffer *ucb;
|
|
|
|
int dsize, esize;
|
2013-03-30 17:25:23 +00:00
|
|
|
|
|
|
|
tu = container_of(con, struct trace_uprobe, consumer);
|
|
|
|
|
2013-11-25 04:42:47 +00:00
|
|
|
udd.tu = tu;
|
|
|
|
udd.bp_addr = func;
|
|
|
|
|
|
|
|
current->utask->vaddr = (unsigned long) &udd;
|
|
|
|
|
2014-01-17 08:08:37 +00:00
|
|
|
if (WARN_ON_ONCE(!uprobe_cpu_buffer))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dsize = __get_data_size(&tu->tp, regs);
|
|
|
|
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
|
|
|
|
|
|
|
ucb = uprobe_buffer_get();
|
|
|
|
store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
|
|
|
|
|
2013-07-03 06:42:53 +00:00
|
|
|
if (tu->tp.flags & TP_FLAG_TRACE)
|
2014-01-17 08:08:37 +00:00
|
|
|
uretprobe_trace_func(tu, func, regs, ucb, dsize);
|
2013-03-30 17:25:23 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2013-07-03 06:42:53 +00:00
|
|
|
if (tu->tp.flags & TP_FLAG_PROFILE)
|
2014-01-17 08:08:37 +00:00
|
|
|
uretprobe_perf_func(tu, func, regs, ucb, dsize);
|
2013-03-30 17:25:23 +00:00
|
|
|
#endif
|
2014-01-17 08:08:37 +00:00
|
|
|
uprobe_buffer_put(ucb);
|
2013-03-30 17:25:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
static struct trace_event_functions uprobe_funcs = {
|
|
|
|
.trace = print_uprobe_event
|
|
|
|
};
|
|
|
|
|
2017-12-06 22:45:16 +00:00
|
|
|
static inline void init_trace_event_call(struct trace_uprobe *tu,
|
|
|
|
struct trace_event_call *call)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
|
|
|
INIT_LIST_HEAD(&call->class->fields);
|
|
|
|
call->event.funcs = &uprobe_funcs;
|
|
|
|
call->class->define_fields = uprobe_event_define_fields;
|
|
|
|
|
2017-12-06 22:45:16 +00:00
|
|
|
call->flags = TRACE_EVENT_FL_UPROBE;
|
|
|
|
call->class->reg = trace_uprobe_register;
|
|
|
|
call->data = tu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int register_uprobe_event(struct trace_uprobe *tu)
|
|
|
|
{
|
|
|
|
struct trace_event_call *call = &tu->tp.call;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
init_trace_event_call(tu, call);
|
|
|
|
|
2013-07-03 07:09:02 +00:00
|
|
|
if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
|
2012-04-11 10:30:43 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-05-05 13:39:12 +00:00
|
|
|
ret = register_trace_event(&call->event);
|
2012-04-11 10:30:43 +00:00
|
|
|
if (!ret) {
|
|
|
|
kfree(call->print_fmt);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
2014-07-15 18:48:24 +00:00
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
ret = trace_add_event_call(call);
|
|
|
|
|
|
|
|
if (ret) {
|
2014-04-08 21:26:21 +00:00
|
|
|
pr_info("Failed to register uprobe event: %s\n",
|
2015-05-13 18:20:14 +00:00
|
|
|
trace_event_name(call));
|
2012-04-11 10:30:43 +00:00
|
|
|
kfree(call->print_fmt);
|
2015-05-05 13:39:12 +00:00
|
|
|
unregister_trace_event(&call->event);
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-07-04 03:33:51 +00:00
|
|
|
static int unregister_uprobe_event(struct trace_uprobe *tu)
|
2012-04-11 10:30:43 +00:00
|
|
|
{
|
2013-07-04 03:33:51 +00:00
|
|
|
int ret;
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
/* tu->event is unregistered in trace_remove_event_call() */
|
2013-07-03 06:42:53 +00:00
|
|
|
ret = trace_remove_event_call(&tu->tp.call);
|
2013-07-04 03:33:51 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-07-03 06:42:53 +00:00
|
|
|
kfree(tu->tp.call.print_fmt);
|
|
|
|
tu->tp.call.print_fmt = NULL;
|
2013-07-04 03:33:51 +00:00
|
|
|
return 0;
|
2012-04-11 10:30:43 +00:00
|
|
|
}
|
|
|
|
|
2017-12-06 22:45:16 +00:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
struct trace_event_call *
|
|
|
|
create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
struct path path;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kern_path(name, LOOKUP_FOLLOW, &path);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
2018-04-23 17:21:34 +00:00
|
|
|
if (!d_is_reg(path.dentry)) {
|
|
|
|
path_put(&path);
|
2017-12-06 22:45:16 +00:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* local trace_kprobes are not added to probe_list, so they are never
|
|
|
|
* searched in find_trace_kprobe(). Therefore, there is no concern of
|
|
|
|
* duplicated name "DUMMY_EVENT" here.
|
|
|
|
*/
|
|
|
|
tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
|
|
|
|
is_return);
|
|
|
|
|
|
|
|
if (IS_ERR(tu)) {
|
|
|
|
pr_info("Failed to allocate trace_uprobe.(%d)\n",
|
|
|
|
(int)PTR_ERR(tu));
|
2018-04-23 17:21:34 +00:00
|
|
|
path_put(&path);
|
2017-12-06 22:45:16 +00:00
|
|
|
return ERR_CAST(tu);
|
|
|
|
}
|
|
|
|
|
|
|
|
tu->offset = offs;
|
2018-04-23 17:21:34 +00:00
|
|
|
tu->path = path;
|
2017-12-06 22:45:16 +00:00
|
|
|
tu->filename = kstrdup(name, GFP_KERNEL);
|
|
|
|
init_trace_event_call(tu, &tu->tp.call);
|
|
|
|
|
|
|
|
if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &tu->tp.call;
|
|
|
|
error:
|
|
|
|
free_trace_uprobe(tu);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void destroy_local_trace_uprobe(struct trace_event_call *event_call)
|
|
|
|
{
|
|
|
|
struct trace_uprobe *tu;
|
|
|
|
|
|
|
|
tu = container_of(event_call, struct trace_uprobe, tp.call);
|
|
|
|
|
|
|
|
kfree(tu->tp.call.print_fmt);
|
|
|
|
tu->tp.call.print_fmt = NULL;
|
|
|
|
|
|
|
|
free_trace_uprobe(tu);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PERF_EVENTS */
|
|
|
|
|
2012-04-11 10:30:43 +00:00
|
|
|
/* Make a trace interface for controling probe points */
|
|
|
|
static __init int init_uprobe_trace(void)
|
|
|
|
{
|
|
|
|
struct dentry *d_tracer;
|
|
|
|
|
|
|
|
d_tracer = tracing_init_dentry();
|
2015-01-20 16:14:16 +00:00
|
|
|
if (IS_ERR(d_tracer))
|
2012-04-11 10:30:43 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
trace_create_file("uprobe_events", 0644, d_tracer,
|
|
|
|
NULL, &uprobe_events_ops);
|
|
|
|
/* Profile interface */
|
|
|
|
trace_create_file("uprobe_profile", 0444, d_tracer,
|
|
|
|
NULL, &uprobe_profile_ops);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
fs_initcall(init_uprobe_trace);
|