2020-05-09 17:59:11 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright (c) 2020 Facebook */
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/pid_namespace.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/fdtable.h>
|
|
|
|
#include <linux/filter.h>
|
2023-10-13 20:44:24 +00:00
|
|
|
#include <linux/bpf_mem_alloc.h>
|
2020-07-20 16:34:03 +00:00
|
|
|
#include <linux/btf_ids.h>
|
2023-10-13 20:44:24 +00:00
|
|
|
#include <linux/mm_types.h>
|
2021-11-05 23:23:29 +00:00
|
|
|
#include "mmap_unlock_work.h"
|
2020-05-09 17:59:11 +00:00
|
|
|
|
2022-09-26 18:49:55 +00:00
|
|
|
static const char * const iter_task_type_names[] = {
|
|
|
|
"ALL",
|
|
|
|
"TID",
|
|
|
|
"PID",
|
|
|
|
};
|
|
|
|
|
2020-05-09 17:59:11 +00:00
|
|
|
struct bpf_iter_seq_task_common {
|
|
|
|
struct pid_namespace *ns;
|
2022-09-26 18:49:53 +00:00
|
|
|
enum bpf_iter_task_type type;
|
|
|
|
u32 pid;
|
|
|
|
u32 pid_visiting;
|
2020-05-09 17:59:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct bpf_iter_seq_task_info {
|
|
|
|
/* The first field must be struct bpf_iter_seq_task_common.
|
|
|
|
* this is assumed by {init, fini}_seq_pidns() callback functions.
|
|
|
|
*/
|
|
|
|
struct bpf_iter_seq_task_common common;
|
|
|
|
u32 tid;
|
|
|
|
};
|
|
|
|
|
2022-09-26 18:49:53 +00:00
|
|
|
static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_common *common,
|
|
|
|
u32 *tid,
|
|
|
|
bool skip_if_dup_files)
|
|
|
|
{
|
2023-09-05 15:46:54 +00:00
|
|
|
struct task_struct *task;
|
2022-09-26 18:49:53 +00:00
|
|
|
struct pid *pid;
|
2023-09-05 15:46:56 +00:00
|
|
|
u32 next_tid;
|
2022-09-26 18:49:53 +00:00
|
|
|
|
|
|
|
if (!*tid) {
|
|
|
|
/* The first time, the iterator calls this function. */
|
|
|
|
pid = find_pid_ns(common->pid, common->ns);
|
|
|
|
task = get_pid_task(pid, PIDTYPE_TGID);
|
|
|
|
if (!task)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
*tid = common->pid;
|
|
|
|
common->pid_visiting = common->pid;
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the control returns to user space and comes back to the
|
|
|
|
* kernel again, *tid and common->pid_visiting should be the
|
|
|
|
* same for task_seq_start() to pick up the correct task.
|
|
|
|
*/
|
|
|
|
if (*tid == common->pid_visiting) {
|
|
|
|
pid = find_pid_ns(common->pid_visiting, common->ns);
|
|
|
|
task = get_pid_task(pid, PIDTYPE_PID);
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2023-09-05 15:46:49 +00:00
|
|
|
task = find_task_by_pid_ns(common->pid_visiting, common->ns);
|
2022-09-26 18:49:53 +00:00
|
|
|
if (!task)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
retry:
|
2023-11-14 16:32:34 +00:00
|
|
|
task = __next_thread(task);
|
|
|
|
if (!task)
|
|
|
|
return NULL;
|
2022-09-26 18:49:53 +00:00
|
|
|
|
2023-09-05 15:46:56 +00:00
|
|
|
next_tid = __task_pid_nr_ns(task, PIDTYPE_PID, common->ns);
|
2023-11-14 16:32:34 +00:00
|
|
|
if (!next_tid)
|
|
|
|
goto retry;
|
2022-09-26 18:49:53 +00:00
|
|
|
|
2023-09-05 15:46:54 +00:00
|
|
|
if (skip_if_dup_files && task->files == task->group_leader->files)
|
2022-09-26 18:49:53 +00:00
|
|
|
goto retry;
|
|
|
|
|
2023-09-05 15:46:56 +00:00
|
|
|
*tid = common->pid_visiting = next_tid;
|
2023-09-05 15:46:54 +00:00
|
|
|
get_task_struct(task);
|
|
|
|
return task;
|
2022-09-26 18:49:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *common,
|
2020-09-02 02:31:12 +00:00
|
|
|
u32 *tid,
|
|
|
|
bool skip_if_dup_files)
|
2020-05-09 17:59:11 +00:00
|
|
|
{
|
|
|
|
struct task_struct *task = NULL;
|
|
|
|
struct pid *pid;
|
|
|
|
|
2022-09-26 18:49:53 +00:00
|
|
|
if (common->type == BPF_TASK_ITER_TID) {
|
|
|
|
if (*tid && *tid != common->pid)
|
|
|
|
return NULL;
|
|
|
|
rcu_read_lock();
|
|
|
|
pid = find_pid_ns(common->pid, common->ns);
|
|
|
|
if (pid) {
|
|
|
|
task = get_pid_task(pid, PIDTYPE_TGID);
|
|
|
|
*tid = common->pid;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (common->type == BPF_TASK_ITER_TGID) {
|
|
|
|
rcu_read_lock();
|
|
|
|
task = task_group_seq_get_next(common, tid, skip_if_dup_files);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2020-05-09 17:59:11 +00:00
|
|
|
rcu_read_lock();
|
2020-05-14 05:51:37 +00:00
|
|
|
retry:
|
2022-09-26 18:49:53 +00:00
|
|
|
pid = find_ge_pid(*tid, common->ns);
|
2020-05-14 05:51:37 +00:00
|
|
|
if (pid) {
|
2022-09-26 18:49:53 +00:00
|
|
|
*tid = pid_nr_ns(pid, common->ns);
|
2020-05-09 17:59:11 +00:00
|
|
|
task = get_pid_task(pid, PIDTYPE_PID);
|
2020-05-14 05:51:37 +00:00
|
|
|
if (!task) {
|
|
|
|
++*tid;
|
|
|
|
goto retry;
|
2020-12-18 18:50:31 +00:00
|
|
|
} else if (skip_if_dup_files && !thread_group_leader(task) &&
|
2020-09-02 02:31:12 +00:00
|
|
|
task->files == task->group_leader->files) {
|
|
|
|
put_task_struct(task);
|
|
|
|
task = NULL;
|
|
|
|
++*tid;
|
|
|
|
goto retry;
|
2020-05-14 05:51:37 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-09 17:59:11 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_seq_start(struct seq_file *seq, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_info *info = seq->private;
|
|
|
|
struct task_struct *task;
|
|
|
|
|
2022-09-26 18:49:53 +00:00
|
|
|
task = task_seq_get_next(&info->common, &info->tid, false);
|
2020-05-09 17:59:11 +00:00
|
|
|
if (!task)
|
|
|
|
return NULL;
|
|
|
|
|
2020-07-22 19:51:56 +00:00
|
|
|
if (*pos == 0)
|
|
|
|
++*pos;
|
2020-05-09 17:59:11 +00:00
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_info *info = seq->private;
|
|
|
|
struct task_struct *task;
|
|
|
|
|
|
|
|
++*pos;
|
|
|
|
++info->tid;
|
|
|
|
put_task_struct((struct task_struct *)v);
|
2022-09-26 18:49:53 +00:00
|
|
|
task = task_seq_get_next(&info->common, &info->tid, false);
|
2020-05-09 17:59:11 +00:00
|
|
|
if (!task)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bpf_iter__task {
|
|
|
|
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
|
|
|
__bpf_md_ptr(struct task_struct *, task);
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
|
|
|
|
|
|
|
|
static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
|
|
|
|
bool in_stop)
|
|
|
|
{
|
|
|
|
struct bpf_iter_meta meta;
|
|
|
|
struct bpf_iter__task ctx;
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
meta.seq = seq;
|
|
|
|
prog = bpf_iter_get_info(&meta, in_stop);
|
|
|
|
if (!prog)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ctx.meta = &meta;
|
|
|
|
ctx.task = task;
|
|
|
|
return bpf_iter_run_prog(prog, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int task_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
return __task_seq_show(seq, v, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void task_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
if (!v)
|
|
|
|
(void)__task_seq_show(seq, v, true);
|
|
|
|
else
|
|
|
|
put_task_struct((struct task_struct *)v);
|
|
|
|
}
|
|
|
|
|
2022-09-26 18:49:53 +00:00
|
|
|
static int bpf_iter_attach_task(struct bpf_prog *prog,
|
|
|
|
union bpf_iter_link_info *linfo,
|
|
|
|
struct bpf_iter_aux_info *aux)
|
|
|
|
{
|
|
|
|
unsigned int flags;
|
|
|
|
struct pid *pid;
|
|
|
|
pid_t tgid;
|
|
|
|
|
|
|
|
if ((!!linfo->task.tid + !!linfo->task.pid + !!linfo->task.pid_fd) > 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
aux->task.type = BPF_TASK_ITER_ALL;
|
|
|
|
if (linfo->task.tid != 0) {
|
|
|
|
aux->task.type = BPF_TASK_ITER_TID;
|
|
|
|
aux->task.pid = linfo->task.tid;
|
|
|
|
}
|
|
|
|
if (linfo->task.pid != 0) {
|
|
|
|
aux->task.type = BPF_TASK_ITER_TGID;
|
|
|
|
aux->task.pid = linfo->task.pid;
|
|
|
|
}
|
|
|
|
if (linfo->task.pid_fd != 0) {
|
|
|
|
aux->task.type = BPF_TASK_ITER_TGID;
|
|
|
|
|
|
|
|
pid = pidfd_get_pid(linfo->task.pid_fd, &flags);
|
|
|
|
if (IS_ERR(pid))
|
|
|
|
return PTR_ERR(pid);
|
|
|
|
|
|
|
|
tgid = pid_nr_ns(pid, task_active_pid_ns(current));
|
|
|
|
aux->task.pid = tgid;
|
|
|
|
put_pid(pid);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-09 17:59:11 +00:00
|
|
|
static const struct seq_operations task_seq_ops = {
|
|
|
|
.start = task_seq_start,
|
|
|
|
.next = task_seq_next,
|
|
|
|
.stop = task_seq_stop,
|
|
|
|
.show = task_seq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct bpf_iter_seq_task_file_info {
|
|
|
|
/* The first field must be struct bpf_iter_seq_task_common.
|
|
|
|
* this is assumed by {init, fini}_seq_pidns() callback functions.
|
|
|
|
*/
|
|
|
|
struct bpf_iter_seq_task_common common;
|
|
|
|
struct task_struct *task;
|
|
|
|
u32 tid;
|
|
|
|
u32 fd;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct file *
|
2020-11-20 00:28:33 +00:00
|
|
|
task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
|
2020-05-09 17:59:11 +00:00
|
|
|
{
|
2022-09-26 18:49:53 +00:00
|
|
|
u32 saved_tid = info->tid;
|
2020-05-09 17:59:11 +00:00
|
|
|
struct task_struct *curr_task;
|
2020-11-20 23:14:33 +00:00
|
|
|
unsigned int curr_fd = info->fd;
|
2024-07-04 15:19:19 +00:00
|
|
|
struct file *f;
|
2020-05-09 17:59:11 +00:00
|
|
|
|
|
|
|
/* If this function returns a non-NULL file object,
|
2020-11-20 23:14:33 +00:00
|
|
|
* it held a reference to the task/file.
|
2020-05-09 17:59:11 +00:00
|
|
|
* Otherwise, it does not hold any reference.
|
|
|
|
*/
|
|
|
|
again:
|
2020-11-20 00:28:33 +00:00
|
|
|
if (info->task) {
|
|
|
|
curr_task = info->task;
|
2020-05-09 17:59:11 +00:00
|
|
|
curr_fd = info->fd;
|
|
|
|
} else {
|
2022-09-26 18:49:53 +00:00
|
|
|
curr_task = task_seq_get_next(&info->common, &info->tid, true);
|
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2020-12-28
The following pull-request contains BPF updates for your *net* tree.
There is a small merge conflict between bpf tree commit 69ca310f3416
("bpf: Save correct stopping point in file seq iteration") and net tree
commit 66ed594409a1 ("bpf/task_iter: In task_file_seq_get_next use
task_lookup_next_fd_rcu"). The get_files_struct() does not exist anymore
in net, so take the hunk in HEAD and add the `info->tid = curr_tid` to
the error path:
[...]
curr_task = task_seq_get_next(ns, &curr_tid, true);
if (!curr_task) {
info->task = NULL;
info->tid = curr_tid;
return NULL;
}
/* set info->task and info->tid */
[...]
We've added 10 non-merge commits during the last 9 day(s) which contain
a total of 11 files changed, 75 insertions(+), 20 deletions(-).
The main changes are:
1) Various AF_XDP fixes such as fill/completion ring leak on failed bind and
fixing a race in skb mode's backpressure mechanism, from Magnus Karlsson.
2) Fix latency spikes on lockdep enabled kernels by adding a rescheduling
point to BPF hashtab initialization, from Eric Dumazet.
3) Fix a splat in task iterator by saving the correct stopping point in the
seq file iteration, from Jonathan Lemon.
4) Fix BPF maps selftest by adding retries in case hashtab returns EBUSY
errors on update/deletes, from Andrii Nakryiko.
5) Fix BPF selftest error reporting to something more user friendly if the
vmlinux BTF cannot be found, from Kamal Mostafa.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-12-28 23:20:48 +00:00
|
|
|
if (!curr_task) {
|
|
|
|
info->task = NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-09-26 18:49:53 +00:00
|
|
|
/* set info->task */
|
2020-12-31 05:24:18 +00:00
|
|
|
info->task = curr_task;
|
2022-09-26 18:49:53 +00:00
|
|
|
if (saved_tid == info->tid)
|
2020-05-09 17:59:11 +00:00
|
|
|
curr_fd = info->fd;
|
2022-09-26 18:49:53 +00:00
|
|
|
else
|
2020-05-09 17:59:11 +00:00
|
|
|
curr_fd = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2024-07-04 15:19:19 +00:00
|
|
|
f = task_lookup_next_fdget_rcu(curr_task, &curr_fd);
|
|
|
|
if (f) {
|
2020-05-09 17:59:11 +00:00
|
|
|
/* set info->fd */
|
|
|
|
info->fd = curr_fd;
|
|
|
|
rcu_read_unlock();
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the current task is done, go to the next task */
|
|
|
|
rcu_read_unlock();
|
|
|
|
put_task_struct(curr_task);
|
2022-09-26 18:49:53 +00:00
|
|
|
|
|
|
|
if (info->common.type == BPF_TASK_ITER_TID) {
|
|
|
|
info->task = NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-20 00:28:33 +00:00
|
|
|
info->task = NULL;
|
2020-05-09 17:59:11 +00:00
|
|
|
info->fd = 0;
|
2022-09-26 18:49:53 +00:00
|
|
|
saved_tid = ++(info->tid);
|
2020-05-09 17:59:11 +00:00
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
struct file *file;
|
|
|
|
|
2020-11-20 00:28:33 +00:00
|
|
|
info->task = NULL;
|
|
|
|
file = task_file_seq_get_next(info);
|
|
|
|
if (file && *pos == 0)
|
2020-07-22 19:51:56 +00:00
|
|
|
++*pos;
|
2020-05-09 17:59:11 +00:00
|
|
|
|
|
|
|
return file;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
|
|
|
|
++*pos;
|
|
|
|
++info->fd;
|
|
|
|
fput((struct file *)v);
|
2020-11-20 00:28:33 +00:00
|
|
|
return task_file_seq_get_next(info);
|
2020-05-09 17:59:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct bpf_iter__task_file {
|
|
|
|
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
|
|
|
__bpf_md_ptr(struct task_struct *, task);
|
|
|
|
u32 fd __aligned(8);
|
|
|
|
__bpf_md_ptr(struct file *, file);
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
|
|
|
|
struct task_struct *task, u32 fd,
|
|
|
|
struct file *file)
|
|
|
|
|
|
|
|
static int __task_file_seq_show(struct seq_file *seq, struct file *file,
|
|
|
|
bool in_stop)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
struct bpf_iter__task_file ctx;
|
|
|
|
struct bpf_iter_meta meta;
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
meta.seq = seq;
|
|
|
|
prog = bpf_iter_get_info(&meta, in_stop);
|
|
|
|
if (!prog)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ctx.meta = &meta;
|
|
|
|
ctx.task = info->task;
|
|
|
|
ctx.fd = info->fd;
|
|
|
|
ctx.file = file;
|
|
|
|
return bpf_iter_run_prog(prog, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int task_file_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
return __task_file_seq_show(seq, v, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void task_file_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
|
|
|
|
if (!v) {
|
|
|
|
(void)__task_file_seq_show(seq, v, true);
|
|
|
|
} else {
|
|
|
|
fput((struct file *)v);
|
|
|
|
put_task_struct(info->task);
|
|
|
|
info->task = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-23 18:41:10 +00:00
|
|
|
static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
|
2020-05-09 17:59:11 +00:00
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_common *common = priv_data;
|
|
|
|
|
|
|
|
common->ns = get_pid_ns(task_active_pid_ns(current));
|
2022-09-26 18:49:53 +00:00
|
|
|
common->type = aux->task.type;
|
|
|
|
common->pid = aux->task.pid;
|
|
|
|
|
2020-05-09 17:59:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fini_seq_pidns(void *priv_data)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_common *common = priv_data;
|
|
|
|
|
|
|
|
put_pid_ns(common->ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations task_file_seq_ops = {
|
|
|
|
.start = task_file_seq_start,
|
|
|
|
.next = task_file_seq_next,
|
|
|
|
.stop = task_file_seq_stop,
|
|
|
|
.show = task_file_seq_show,
|
|
|
|
};
|
|
|
|
|
2021-02-12 18:31:05 +00:00
|
|
|
struct bpf_iter_seq_task_vma_info {
|
|
|
|
/* The first field must be struct bpf_iter_seq_task_common.
|
|
|
|
* this is assumed by {init, fini}_seq_pidns() callback functions.
|
|
|
|
*/
|
|
|
|
struct bpf_iter_seq_task_common common;
|
|
|
|
struct task_struct *task;
|
2022-12-16 22:18:54 +00:00
|
|
|
struct mm_struct *mm;
|
2021-02-12 18:31:05 +00:00
|
|
|
struct vm_area_struct *vma;
|
|
|
|
u32 tid;
|
|
|
|
unsigned long prev_vm_start;
|
|
|
|
unsigned long prev_vm_end;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum bpf_task_vma_iter_find_op {
|
2022-09-06 19:48:59 +00:00
|
|
|
task_vma_iter_first_vma, /* use find_vma() with addr 0 */
|
|
|
|
task_vma_iter_next_vma, /* use vma_next() with curr_vma */
|
2021-02-12 18:31:05 +00:00
|
|
|
task_vma_iter_find_vma, /* use find_vma() to find next vma */
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct vm_area_struct *
|
|
|
|
task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
|
|
|
|
{
|
|
|
|
enum bpf_task_vma_iter_find_op op;
|
|
|
|
struct vm_area_struct *curr_vma;
|
|
|
|
struct task_struct *curr_task;
|
2022-12-16 22:18:54 +00:00
|
|
|
struct mm_struct *curr_mm;
|
2022-09-26 18:49:53 +00:00
|
|
|
u32 saved_tid = info->tid;
|
2021-02-12 18:31:05 +00:00
|
|
|
|
|
|
|
/* If this function returns a non-NULL vma, it holds a reference to
|
2022-12-16 22:18:54 +00:00
|
|
|
* the task_struct, holds a refcount on mm->mm_users, and holds
|
|
|
|
* read lock on vma->mm->mmap_lock.
|
2021-02-12 18:31:05 +00:00
|
|
|
* If this function returns NULL, it does not hold any reference or
|
|
|
|
* lock.
|
|
|
|
*/
|
|
|
|
if (info->task) {
|
|
|
|
curr_task = info->task;
|
|
|
|
curr_vma = info->vma;
|
2022-12-16 22:18:54 +00:00
|
|
|
curr_mm = info->mm;
|
2021-02-12 18:31:05 +00:00
|
|
|
/* In case of lock contention, drop mmap_lock to unblock
|
|
|
|
* the writer.
|
|
|
|
*
|
|
|
|
* After relock, call find(mm, prev_vm_end - 1) to find
|
|
|
|
* new vma to process.
|
|
|
|
*
|
|
|
|
* +------+------+-----------+
|
|
|
|
* | VMA1 | VMA2 | VMA3 |
|
|
|
|
* +------+------+-----------+
|
|
|
|
* | | | |
|
|
|
|
* 4k 8k 16k 400k
|
|
|
|
*
|
|
|
|
* For example, curr_vma == VMA2. Before unlock, we set
|
|
|
|
*
|
|
|
|
* prev_vm_start = 8k
|
|
|
|
* prev_vm_end = 16k
|
|
|
|
*
|
|
|
|
* There are a few cases:
|
|
|
|
*
|
|
|
|
* 1) VMA2 is freed, but VMA3 exists.
|
|
|
|
*
|
|
|
|
* find_vma() will return VMA3, just process VMA3.
|
|
|
|
*
|
|
|
|
* 2) VMA2 still exists.
|
|
|
|
*
|
|
|
|
* find_vma() will return VMA2, process VMA2->next.
|
|
|
|
*
|
|
|
|
* 3) no more vma in this mm.
|
|
|
|
*
|
|
|
|
* Process the next task.
|
|
|
|
*
|
|
|
|
* 4) find_vma() returns a different vma, VMA2'.
|
|
|
|
*
|
|
|
|
* 4.1) If VMA2 covers same range as VMA2', skip VMA2',
|
|
|
|
* because we already covered the range;
|
|
|
|
* 4.2) VMA2 and VMA2' covers different ranges, process
|
|
|
|
* VMA2'.
|
|
|
|
*/
|
2022-12-16 22:18:54 +00:00
|
|
|
if (mmap_lock_is_contended(curr_mm)) {
|
2021-02-12 18:31:05 +00:00
|
|
|
info->prev_vm_start = curr_vma->vm_start;
|
|
|
|
info->prev_vm_end = curr_vma->vm_end;
|
|
|
|
op = task_vma_iter_find_vma;
|
2022-12-16 22:18:54 +00:00
|
|
|
mmap_read_unlock(curr_mm);
|
|
|
|
if (mmap_read_lock_killable(curr_mm)) {
|
|
|
|
mmput(curr_mm);
|
2021-02-12 18:31:05 +00:00
|
|
|
goto finish;
|
2022-12-16 22:18:54 +00:00
|
|
|
}
|
2021-02-12 18:31:05 +00:00
|
|
|
} else {
|
|
|
|
op = task_vma_iter_next_vma;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
again:
|
2022-09-26 18:49:53 +00:00
|
|
|
curr_task = task_seq_get_next(&info->common, &info->tid, true);
|
2021-02-12 18:31:05 +00:00
|
|
|
if (!curr_task) {
|
2022-09-26 18:49:53 +00:00
|
|
|
info->tid++;
|
2021-02-12 18:31:05 +00:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2022-09-26 18:49:53 +00:00
|
|
|
if (saved_tid != info->tid) {
|
2021-02-12 18:31:05 +00:00
|
|
|
/* new task, process the first vma */
|
|
|
|
op = task_vma_iter_first_vma;
|
|
|
|
} else {
|
|
|
|
/* Found the same tid, which means the user space
|
|
|
|
* finished data in previous buffer and read more.
|
|
|
|
* We dropped mmap_lock before returning to user
|
|
|
|
* space, so it is necessary to use find_vma() to
|
|
|
|
* find the next vma to process.
|
|
|
|
*/
|
|
|
|
op = task_vma_iter_find_vma;
|
|
|
|
}
|
|
|
|
|
2022-12-16 22:18:54 +00:00
|
|
|
curr_mm = get_task_mm(curr_task);
|
|
|
|
if (!curr_mm)
|
2021-02-12 18:31:05 +00:00
|
|
|
goto next_task;
|
|
|
|
|
2022-12-16 22:18:54 +00:00
|
|
|
if (mmap_read_lock_killable(curr_mm)) {
|
|
|
|
mmput(curr_mm);
|
2021-02-12 18:31:05 +00:00
|
|
|
goto finish;
|
2022-12-16 22:18:54 +00:00
|
|
|
}
|
2021-02-12 18:31:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case task_vma_iter_first_vma:
|
2022-12-16 22:18:54 +00:00
|
|
|
curr_vma = find_vma(curr_mm, 0);
|
2021-02-12 18:31:05 +00:00
|
|
|
break;
|
|
|
|
case task_vma_iter_next_vma:
|
2022-12-16 22:18:54 +00:00
|
|
|
curr_vma = find_vma(curr_mm, curr_vma->vm_end);
|
2021-02-12 18:31:05 +00:00
|
|
|
break;
|
|
|
|
case task_vma_iter_find_vma:
|
|
|
|
/* We dropped mmap_lock so it is necessary to use find_vma
|
|
|
|
* to find the next vma. This is similar to the mechanism
|
|
|
|
* in show_smaps_rollup().
|
|
|
|
*/
|
2022-12-16 22:18:54 +00:00
|
|
|
curr_vma = find_vma(curr_mm, info->prev_vm_end - 1);
|
2021-02-12 18:31:05 +00:00
|
|
|
/* case 1) and 4.2) above just use curr_vma */
|
|
|
|
|
|
|
|
/* check for case 2) or case 4.1) above */
|
|
|
|
if (curr_vma &&
|
|
|
|
curr_vma->vm_start == info->prev_vm_start &&
|
|
|
|
curr_vma->vm_end == info->prev_vm_end)
|
2022-12-16 22:18:54 +00:00
|
|
|
curr_vma = find_vma(curr_mm, curr_vma->vm_end);
|
2021-02-12 18:31:05 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!curr_vma) {
|
|
|
|
/* case 3) above, or case 2) 4.1) with vma->next == NULL */
|
2022-12-16 22:18:54 +00:00
|
|
|
mmap_read_unlock(curr_mm);
|
|
|
|
mmput(curr_mm);
|
2021-02-12 18:31:05 +00:00
|
|
|
goto next_task;
|
|
|
|
}
|
|
|
|
info->task = curr_task;
|
|
|
|
info->vma = curr_vma;
|
2022-12-16 22:18:54 +00:00
|
|
|
info->mm = curr_mm;
|
2021-02-12 18:31:05 +00:00
|
|
|
return curr_vma;
|
|
|
|
|
|
|
|
next_task:
|
2022-09-26 18:49:53 +00:00
|
|
|
if (info->common.type == BPF_TASK_ITER_TID)
|
|
|
|
goto finish;
|
|
|
|
|
2021-02-12 18:31:05 +00:00
|
|
|
put_task_struct(curr_task);
|
|
|
|
info->task = NULL;
|
2022-12-16 22:18:54 +00:00
|
|
|
info->mm = NULL;
|
2022-09-26 18:49:53 +00:00
|
|
|
info->tid++;
|
2021-02-12 18:31:05 +00:00
|
|
|
goto again;
|
|
|
|
|
|
|
|
finish:
|
|
|
|
if (curr_task)
|
|
|
|
put_task_struct(curr_task);
|
|
|
|
info->task = NULL;
|
|
|
|
info->vma = NULL;
|
2022-12-16 22:18:54 +00:00
|
|
|
info->mm = NULL;
|
2021-02-12 18:31:05 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_vma_info *info = seq->private;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
vma = task_vma_seq_get_next(info);
|
|
|
|
if (vma && *pos == 0)
|
|
|
|
++*pos;
|
|
|
|
|
|
|
|
return vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_vma_info *info = seq->private;
|
|
|
|
|
|
|
|
++*pos;
|
|
|
|
return task_vma_seq_get_next(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bpf_iter__task_vma {
|
|
|
|
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
|
|
|
__bpf_md_ptr(struct task_struct *, task);
|
|
|
|
__bpf_md_ptr(struct vm_area_struct *, vma);
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
|
|
|
|
struct task_struct *task, struct vm_area_struct *vma)
|
|
|
|
|
|
|
|
static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_vma_info *info = seq->private;
|
|
|
|
struct bpf_iter__task_vma ctx;
|
|
|
|
struct bpf_iter_meta meta;
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
meta.seq = seq;
|
|
|
|
prog = bpf_iter_get_info(&meta, in_stop);
|
|
|
|
if (!prog)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ctx.meta = &meta;
|
|
|
|
ctx.task = info->task;
|
|
|
|
ctx.vma = info->vma;
|
|
|
|
return bpf_iter_run_prog(prog, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int task_vma_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
return __task_vma_seq_show(seq, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void task_vma_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_vma_info *info = seq->private;
|
|
|
|
|
|
|
|
if (!v) {
|
|
|
|
(void)__task_vma_seq_show(seq, true);
|
|
|
|
} else {
|
|
|
|
/* info->vma has not been seen by the BPF program. If the
|
|
|
|
* user space reads more, task_vma_seq_get_next should
|
|
|
|
* return this vma again. Set prev_vm_start to ~0UL,
|
|
|
|
* so that we don't skip the vma returned by the next
|
|
|
|
* find_vma() (case task_vma_iter_find_vma in
|
|
|
|
* task_vma_seq_get_next()).
|
|
|
|
*/
|
|
|
|
info->prev_vm_start = ~0UL;
|
|
|
|
info->prev_vm_end = info->vma->vm_end;
|
2022-12-16 22:18:54 +00:00
|
|
|
mmap_read_unlock(info->mm);
|
|
|
|
mmput(info->mm);
|
|
|
|
info->mm = NULL;
|
2021-02-12 18:31:05 +00:00
|
|
|
put_task_struct(info->task);
|
|
|
|
info->task = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations task_vma_seq_ops = {
|
|
|
|
.start = task_vma_seq_start,
|
|
|
|
.next = task_vma_seq_next,
|
|
|
|
.stop = task_vma_seq_stop,
|
|
|
|
.show = task_vma_seq_show,
|
|
|
|
};
|
|
|
|
|
2020-07-23 18:41:09 +00:00
|
|
|
static const struct bpf_iter_seq_info task_seq_info = {
|
2020-05-13 18:02:19 +00:00
|
|
|
.seq_ops = &task_seq_ops,
|
|
|
|
.init_seq_private = init_seq_pidns,
|
|
|
|
.fini_seq_private = fini_seq_pidns,
|
|
|
|
.seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
|
2020-07-23 18:41:09 +00:00
|
|
|
};
|
|
|
|
|
2022-09-26 18:49:54 +00:00
|
|
|
static int bpf_iter_fill_link_info(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info)
|
|
|
|
{
|
|
|
|
switch (aux->task.type) {
|
|
|
|
case BPF_TASK_ITER_TID:
|
|
|
|
info->iter.task.tid = aux->task.pid;
|
|
|
|
break;
|
|
|
|
case BPF_TASK_ITER_TGID:
|
|
|
|
info->iter.task.pid = aux->task.pid;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-09-26 18:49:55 +00:00
|
|
|
static void bpf_iter_task_show_fdinfo(const struct bpf_iter_aux_info *aux, struct seq_file *seq)
|
|
|
|
{
|
|
|
|
seq_printf(seq, "task_type:\t%s\n", iter_task_type_names[aux->task.type]);
|
|
|
|
if (aux->task.type == BPF_TASK_ITER_TID)
|
|
|
|
seq_printf(seq, "tid:\t%u\n", aux->task.pid);
|
|
|
|
else if (aux->task.type == BPF_TASK_ITER_TGID)
|
|
|
|
seq_printf(seq, "pid:\t%u\n", aux->task.pid);
|
|
|
|
}
|
|
|
|
|
2020-07-23 18:41:09 +00:00
|
|
|
static struct bpf_iter_reg task_reg_info = {
|
|
|
|
.target = "task",
|
2022-09-26 18:49:53 +00:00
|
|
|
.attach_target = bpf_iter_attach_task,
|
bpf: Permit cond_resched for some iterators
Commit e679654a704e ("bpf: Fix a rcu_sched stall issue with
bpf task/task_file iterator") tries to fix rcu stalls warning
which is caused by bpf task_file iterator when running
"bpftool prog".
rcu: INFO: rcu_sched self-detected stall on CPU
rcu: \x097-....: (20999 ticks this GP) idle=302/1/0x4000000000000000 softirq=1508852/1508852 fqs=4913
\x09(t=21031 jiffies g=2534773 q=179750)
NMI backtrace for cpu 7
CPU: 7 PID: 184195 Comm: bpftool Kdump: loaded Tainted: G W 5.8.0-00004-g68bfc7f8c1b4 #6
Hardware name: Quanta Twin Lakes MP/Twin Lakes Passive MP, BIOS F09_3A17 05/03/2019
Call Trace:
<IRQ>
dump_stack+0x57/0x70
nmi_cpu_backtrace.cold+0x14/0x53
? lapic_can_unplug_cpu.cold+0x39/0x39
nmi_trigger_cpumask_backtrace+0xb7/0xc7
rcu_dump_cpu_stacks+0xa2/0xd0
rcu_sched_clock_irq.cold+0x1ff/0x3d9
? tick_nohz_handler+0x100/0x100
update_process_times+0x5b/0x90
tick_sched_timer+0x5e/0xf0
__hrtimer_run_queues+0x12a/0x2a0
hrtimer_interrupt+0x10e/0x280
__sysvec_apic_timer_interrupt+0x51/0xe0
asm_call_on_stack+0xf/0x20
</IRQ>
sysvec_apic_timer_interrupt+0x6f/0x80
...
task_file_seq_next+0x52/0xa0
bpf_seq_read+0xb9/0x320
vfs_read+0x9d/0x180
ksys_read+0x5f/0xe0
do_syscall_64+0x38/0x60
entry_SYSCALL_64_after_hwframe+0x44/0xa9
The fix is to limit the number of bpf program runs to be
one million. This fixed the program in most cases. But
we also found under heavy load, which can increase the wallclock
time for bpf_seq_read(), the warning may still be possible.
For example, calling bpf_delay() in the "while" loop of
bpf_seq_read(), which will introduce artificial delay,
the warning will show up in my qemu run.
static unsigned q;
volatile unsigned *p = &q;
volatile unsigned long long ll;
static void bpf_delay(void)
{
int i, j;
for (i = 0; i < 10000; i++)
for (j = 0; j < 10000; j++)
ll += *p;
}
There are two ways to fix this issue. One is to reduce the above
one million threshold to say 100,000 and hopefully rcu warning will
not show up any more. Another is to introduce a target feature
which enables bpf_seq_read() calling cond_resched().
This patch took second approach as the first approach may cause
more -EAGAIN failures for read() syscalls. Note that not all bpf_iter
targets can permit cond_resched() in bpf_seq_read() as some, e.g.,
netlink seq iterator, rcu read lock critical section spans through
seq_ops->next() -> seq_ops->show() -> seq_ops->next().
For the kernel code with the above hack, "bpftool p" roughly takes
38 seconds to finish on my VM with 184 bpf program runs.
Using the following command, I am able to collect the number of
context switches:
perf stat -e context-switches -- ./bpftool p >& log
Without this patch,
69 context-switches
With this patch,
75 context-switches
This patch added additional 6 context switches, roughly every 6 seconds
to reschedule, to avoid lengthy no-rescheduling which may cause the
above RCU warnings.
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20201028061054.1411116-1-yhs@fb.com
2020-10-28 06:10:54 +00:00
|
|
|
.feature = BPF_ITER_RESCHED,
|
2020-05-13 18:02:21 +00:00
|
|
|
.ctx_arg_info_size = 1,
|
|
|
|
.ctx_arg_info = {
|
|
|
|
{ offsetof(struct bpf_iter__task, task),
|
2023-11-07 13:22:03 +00:00
|
|
|
PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
|
2020-05-13 18:02:21 +00:00
|
|
|
},
|
2020-07-23 18:41:09 +00:00
|
|
|
.seq_info = &task_seq_info,
|
2022-09-26 18:49:54 +00:00
|
|
|
.fill_link_info = bpf_iter_fill_link_info,
|
2022-09-26 18:49:55 +00:00
|
|
|
.show_fdinfo = bpf_iter_task_show_fdinfo,
|
2020-05-13 18:02:19 +00:00
|
|
|
};
|
|
|
|
|
2020-07-23 18:41:09 +00:00
|
|
|
static const struct bpf_iter_seq_info task_file_seq_info = {
|
2020-05-13 18:02:19 +00:00
|
|
|
.seq_ops = &task_file_seq_ops,
|
|
|
|
.init_seq_private = init_seq_pidns,
|
|
|
|
.fini_seq_private = fini_seq_pidns,
|
|
|
|
.seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
|
2020-07-23 18:41:09 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct bpf_iter_reg task_file_reg_info = {
|
|
|
|
.target = "task_file",
|
2022-09-26 18:49:53 +00:00
|
|
|
.attach_target = bpf_iter_attach_task,
|
bpf: Permit cond_resched for some iterators
Commit e679654a704e ("bpf: Fix a rcu_sched stall issue with
bpf task/task_file iterator") tries to fix rcu stalls warning
which is caused by bpf task_file iterator when running
"bpftool prog".
rcu: INFO: rcu_sched self-detected stall on CPU
rcu: \x097-....: (20999 ticks this GP) idle=302/1/0x4000000000000000 softirq=1508852/1508852 fqs=4913
\x09(t=21031 jiffies g=2534773 q=179750)
NMI backtrace for cpu 7
CPU: 7 PID: 184195 Comm: bpftool Kdump: loaded Tainted: G W 5.8.0-00004-g68bfc7f8c1b4 #6
Hardware name: Quanta Twin Lakes MP/Twin Lakes Passive MP, BIOS F09_3A17 05/03/2019
Call Trace:
<IRQ>
dump_stack+0x57/0x70
nmi_cpu_backtrace.cold+0x14/0x53
? lapic_can_unplug_cpu.cold+0x39/0x39
nmi_trigger_cpumask_backtrace+0xb7/0xc7
rcu_dump_cpu_stacks+0xa2/0xd0
rcu_sched_clock_irq.cold+0x1ff/0x3d9
? tick_nohz_handler+0x100/0x100
update_process_times+0x5b/0x90
tick_sched_timer+0x5e/0xf0
__hrtimer_run_queues+0x12a/0x2a0
hrtimer_interrupt+0x10e/0x280
__sysvec_apic_timer_interrupt+0x51/0xe0
asm_call_on_stack+0xf/0x20
</IRQ>
sysvec_apic_timer_interrupt+0x6f/0x80
...
task_file_seq_next+0x52/0xa0
bpf_seq_read+0xb9/0x320
vfs_read+0x9d/0x180
ksys_read+0x5f/0xe0
do_syscall_64+0x38/0x60
entry_SYSCALL_64_after_hwframe+0x44/0xa9
The fix is to limit the number of bpf program runs to be
one million. This fixed the program in most cases. But
we also found under heavy load, which can increase the wallclock
time for bpf_seq_read(), the warning may still be possible.
For example, calling bpf_delay() in the "while" loop of
bpf_seq_read(), which will introduce artificial delay,
the warning will show up in my qemu run.
static unsigned q;
volatile unsigned *p = &q;
volatile unsigned long long ll;
static void bpf_delay(void)
{
int i, j;
for (i = 0; i < 10000; i++)
for (j = 0; j < 10000; j++)
ll += *p;
}
There are two ways to fix this issue. One is to reduce the above
one million threshold to say 100,000 and hopefully rcu warning will
not show up any more. Another is to introduce a target feature
which enables bpf_seq_read() calling cond_resched().
This patch took second approach as the first approach may cause
more -EAGAIN failures for read() syscalls. Note that not all bpf_iter
targets can permit cond_resched() in bpf_seq_read() as some, e.g.,
netlink seq iterator, rcu read lock critical section spans through
seq_ops->next() -> seq_ops->show() -> seq_ops->next().
For the kernel code with the above hack, "bpftool p" roughly takes
38 seconds to finish on my VM with 184 bpf program runs.
Using the following command, I am able to collect the number of
context switches:
perf stat -e context-switches -- ./bpftool p >& log
Without this patch,
69 context-switches
With this patch,
75 context-switches
This patch added additional 6 context switches, roughly every 6 seconds
to reschedule, to avoid lengthy no-rescheduling which may cause the
above RCU warnings.
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20201028061054.1411116-1-yhs@fb.com
2020-10-28 06:10:54 +00:00
|
|
|
.feature = BPF_ITER_RESCHED,
|
2020-05-13 18:02:21 +00:00
|
|
|
.ctx_arg_info_size = 2,
|
|
|
|
.ctx_arg_info = {
|
|
|
|
{ offsetof(struct bpf_iter__task_file, task),
|
|
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
|
|
{ offsetof(struct bpf_iter__task_file, file),
|
|
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
|
|
},
|
2020-07-23 18:41:09 +00:00
|
|
|
.seq_info = &task_file_seq_info,
|
2022-09-26 18:49:54 +00:00
|
|
|
.fill_link_info = bpf_iter_fill_link_info,
|
2022-09-26 18:49:55 +00:00
|
|
|
.show_fdinfo = bpf_iter_task_show_fdinfo,
|
2020-05-13 18:02:19 +00:00
|
|
|
};
|
|
|
|
|
2021-02-12 18:31:05 +00:00
|
|
|
static const struct bpf_iter_seq_info task_vma_seq_info = {
|
|
|
|
.seq_ops = &task_vma_seq_ops,
|
|
|
|
.init_seq_private = init_seq_pidns,
|
|
|
|
.fini_seq_private = fini_seq_pidns,
|
|
|
|
.seq_priv_size = sizeof(struct bpf_iter_seq_task_vma_info),
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct bpf_iter_reg task_vma_reg_info = {
|
|
|
|
.target = "task_vma",
|
2022-09-26 18:49:53 +00:00
|
|
|
.attach_target = bpf_iter_attach_task,
|
2021-02-12 18:31:05 +00:00
|
|
|
.feature = BPF_ITER_RESCHED,
|
|
|
|
.ctx_arg_info_size = 2,
|
|
|
|
.ctx_arg_info = {
|
|
|
|
{ offsetof(struct bpf_iter__task_vma, task),
|
|
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
|
|
{ offsetof(struct bpf_iter__task_vma, vma),
|
|
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
|
|
},
|
|
|
|
.seq_info = &task_vma_seq_info,
|
2022-09-26 18:49:54 +00:00
|
|
|
.fill_link_info = bpf_iter_fill_link_info,
|
2022-09-26 18:49:55 +00:00
|
|
|
.show_fdinfo = bpf_iter_task_show_fdinfo,
|
2021-02-12 18:31:05 +00:00
|
|
|
};
|
|
|
|
|
2021-11-05 23:23:29 +00:00
|
|
|
BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
|
|
|
|
bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
|
|
|
|
{
|
|
|
|
struct mmap_unlock_irq_work *work = NULL;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
bool irq_work_busy = false;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
int ret = -ENOENT;
|
|
|
|
|
|
|
|
if (flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
mm = task->mm;
|
|
|
|
if (!mm)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
|
|
|
|
|
|
|
|
if (irq_work_busy || !mmap_read_trylock(mm))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
vma = find_vma(mm, start);
|
|
|
|
|
|
|
|
if (vma && vma->vm_start <= start && vma->vm_end > start) {
|
|
|
|
callback_fn((u64)(long)task, (u64)(long)vma,
|
|
|
|
(u64)(long)callback_ctx, 0, 0);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
bpf_mmap_unlock_mm(work, mm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct bpf_func_proto bpf_find_vma_proto = {
|
|
|
|
.func = bpf_find_vma,
|
|
|
|
.ret_type = RET_INTEGER,
|
|
|
|
.arg1_type = ARG_PTR_TO_BTF_ID,
|
2021-11-12 15:02:43 +00:00
|
|
|
.arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
|
2021-11-05 23:23:29 +00:00
|
|
|
.arg2_type = ARG_ANYTHING,
|
|
|
|
.arg3_type = ARG_PTR_TO_FUNC,
|
|
|
|
.arg4_type = ARG_PTR_TO_STACK_OR_NULL,
|
|
|
|
.arg5_type = ARG_ANYTHING,
|
|
|
|
};
|
|
|
|
|
2023-10-13 20:44:24 +00:00
|
|
|
struct bpf_iter_task_vma_kern_data {
|
|
|
|
struct task_struct *task;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
struct mmap_unlock_irq_work *work;
|
|
|
|
struct vma_iterator vmi;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct bpf_iter_task_vma {
|
|
|
|
/* opaque iterator state; having __u64 here allows to preserve correct
|
|
|
|
* alignment requirements in vmlinux.h, generated from BTF
|
|
|
|
*/
|
|
|
|
__u64 __opaque[1];
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
|
|
|
/* Non-opaque version of bpf_iter_task_vma */
|
|
|
|
struct bpf_iter_task_vma_kern {
|
|
|
|
struct bpf_iter_task_vma_kern_data *data;
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
2023-10-31 21:56:24 +00:00
|
|
|
__bpf_kfunc_start_defs();
|
2023-10-13 20:44:24 +00:00
|
|
|
|
|
|
|
__bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
|
|
|
|
struct task_struct *task, u64 addr)
|
|
|
|
{
|
|
|
|
struct bpf_iter_task_vma_kern *kit = (void *)it;
|
|
|
|
bool irq_work_busy = false;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(struct bpf_iter_task_vma_kern) != sizeof(struct bpf_iter_task_vma));
|
|
|
|
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_vma_kern) != __alignof__(struct bpf_iter_task_vma));
|
|
|
|
|
|
|
|
/* is_iter_reg_valid_uninit guarantees that kit hasn't been initialized
|
|
|
|
* before, so non-NULL kit->data doesn't point to previously
|
|
|
|
* bpf_mem_alloc'd bpf_iter_task_vma_kern_data
|
|
|
|
*/
|
|
|
|
kit->data = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_iter_task_vma_kern_data));
|
|
|
|
if (!kit->data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
kit->data->task = get_task_struct(task);
|
|
|
|
kit->data->mm = task->mm;
|
|
|
|
if (!kit->data->mm) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto err_cleanup_iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* kit->data->work == NULL is valid after bpf_mmap_unlock_get_irq_work */
|
|
|
|
irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work);
|
|
|
|
if (irq_work_busy || !mmap_read_trylock(kit->data->mm)) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto err_cleanup_iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
vma_iter_init(&kit->data->vmi, kit->data->mm, addr);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_cleanup_iter:
|
|
|
|
if (kit->data->task)
|
|
|
|
put_task_struct(kit->data->task);
|
|
|
|
bpf_mem_free(&bpf_global_ma, kit->data);
|
|
|
|
/* NULL kit->data signals failed bpf_iter_task_vma initialization */
|
|
|
|
kit->data = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
__bpf_kfunc struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it)
|
|
|
|
{
|
|
|
|
struct bpf_iter_task_vma_kern *kit = (void *)it;
|
|
|
|
|
|
|
|
if (!kit->data) /* bpf_iter_task_vma_new failed */
|
|
|
|
return NULL;
|
|
|
|
return vma_next(&kit->data->vmi);
|
|
|
|
}
|
|
|
|
|
|
|
|
__bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
|
|
|
|
{
|
|
|
|
struct bpf_iter_task_vma_kern *kit = (void *)it;
|
|
|
|
|
|
|
|
if (kit->data) {
|
|
|
|
bpf_mmap_unlock_mm(kit->data->work, kit->data->mm);
|
|
|
|
put_task_struct(kit->data->task);
|
|
|
|
bpf_mem_free(&bpf_global_ma, kit->data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-31 21:56:24 +00:00
|
|
|
__bpf_kfunc_end_defs();
|
2023-10-13 20:44:24 +00:00
|
|
|
|
2023-11-01 18:16:01 +00:00
|
|
|
#ifdef CONFIG_CGROUPS
|
|
|
|
|
2023-10-18 06:17:40 +00:00
|
|
|
struct bpf_iter_css_task {
|
|
|
|
__u64 __opaque[1];
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
|
|
|
struct bpf_iter_css_task_kern {
|
|
|
|
struct css_task_iter *css_it;
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
2023-10-31 21:56:24 +00:00
|
|
|
__bpf_kfunc_start_defs();
|
2023-10-18 06:17:40 +00:00
|
|
|
|
|
|
|
__bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
|
|
|
|
struct cgroup_subsys_state *css, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct bpf_iter_css_task_kern *kit = (void *)it;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(struct bpf_iter_css_task_kern) != sizeof(struct bpf_iter_css_task));
|
|
|
|
BUILD_BUG_ON(__alignof__(struct bpf_iter_css_task_kern) !=
|
|
|
|
__alignof__(struct bpf_iter_css_task));
|
|
|
|
kit->css_it = NULL;
|
|
|
|
switch (flags) {
|
|
|
|
case CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED:
|
|
|
|
case CSS_TASK_ITER_PROCS:
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
kit->css_it = bpf_mem_alloc(&bpf_global_ma, sizeof(struct css_task_iter));
|
|
|
|
if (!kit->css_it)
|
|
|
|
return -ENOMEM;
|
|
|
|
css_task_iter_start(css, flags, kit->css_it);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__bpf_kfunc struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it)
|
|
|
|
{
|
|
|
|
struct bpf_iter_css_task_kern *kit = (void *)it;
|
|
|
|
|
|
|
|
if (!kit->css_it)
|
|
|
|
return NULL;
|
|
|
|
return css_task_iter_next(kit->css_it);
|
|
|
|
}
|
|
|
|
|
|
|
|
__bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it)
|
|
|
|
{
|
|
|
|
struct bpf_iter_css_task_kern *kit = (void *)it;
|
|
|
|
|
|
|
|
if (!kit->css_it)
|
|
|
|
return;
|
|
|
|
css_task_iter_end(kit->css_it);
|
|
|
|
bpf_mem_free(&bpf_global_ma, kit->css_it);
|
|
|
|
}
|
|
|
|
|
2023-10-31 21:56:24 +00:00
|
|
|
__bpf_kfunc_end_defs();
|
2023-10-18 06:17:40 +00:00
|
|
|
|
2023-11-01 18:16:01 +00:00
|
|
|
#endif /* CONFIG_CGROUPS */
|
|
|
|
|
2023-10-18 06:17:41 +00:00
|
|
|
struct bpf_iter_task {
|
|
|
|
__u64 __opaque[3];
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
|
|
|
struct bpf_iter_task_kern {
|
|
|
|
struct task_struct *task;
|
|
|
|
struct task_struct *pos;
|
|
|
|
unsigned int flags;
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
|
|
|
enum {
|
|
|
|
/* all process in the system */
|
|
|
|
BPF_TASK_ITER_ALL_PROCS,
|
|
|
|
/* all threads in the system */
|
|
|
|
BPF_TASK_ITER_ALL_THREADS,
|
|
|
|
/* all threads of a specific process */
|
|
|
|
BPF_TASK_ITER_PROC_THREADS
|
|
|
|
};
|
|
|
|
|
2023-10-31 21:56:24 +00:00
|
|
|
__bpf_kfunc_start_defs();
|
2023-10-18 06:17:41 +00:00
|
|
|
|
|
|
|
__bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
|
bpf: Let bpf_iter_task_new accept null task ptr
When using task_iter to iterate all threads of a specific task, we enforce
that the user must pass a valid task pointer to ensure safety. However,
when iterating all threads/process in the system, BPF verifier still
require a valid ptr instead of "nullable" pointer, even though it's
pointless, which is a kind of surprising from usability standpoint. It
would be nice if we could let that kfunc accept a explicit null pointer
when we are using BPF_TASK_ITER_ALL_{PROCS, THREADS} and a valid pointer
when using BPF_TASK_ITER_THREAD.
Given a trival kfunc:
__bpf_kfunc void FN(struct TYPE_A *obj);
BPF Prog would reject a nullptr for obj. The error info is:
"arg#x pointer type xx xx must point to scalar, or struct with scalar"
reported by get_kfunc_ptr_arg_type(). The reg->type is SCALAR_VALUE and
the btf type of ref_t is not scalar or scalar_struct which leads to the
rejection of get_kfunc_ptr_arg_type.
This patch add "__nullable" annotation:
__bpf_kfunc void FN(struct TYPE_A *obj__nullable);
Here __nullable indicates obj can be optional, user can pass a explicit
nullptr or a normal TYPE_A pointer. In get_kfunc_ptr_arg_type(), we will
detect whether the current arg is optional and register is null, If so,
return a new kfunc_ptr_arg_type KF_ARG_PTR_TO_NULL and skip to the next
arg in check_kfunc_args().
Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231018061746.111364-7-zhouchuyi@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-10-18 06:17:44 +00:00
|
|
|
struct task_struct *task__nullable, unsigned int flags)
|
2023-10-18 06:17:41 +00:00
|
|
|
{
|
|
|
|
struct bpf_iter_task_kern *kit = (void *)it;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(struct bpf_iter_task_kern) > sizeof(struct bpf_iter_task));
|
|
|
|
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_kern) !=
|
|
|
|
__alignof__(struct bpf_iter_task));
|
|
|
|
|
2024-02-17 11:41:51 +00:00
|
|
|
kit->pos = NULL;
|
|
|
|
|
2023-10-18 06:17:41 +00:00
|
|
|
switch (flags) {
|
|
|
|
case BPF_TASK_ITER_ALL_THREADS:
|
|
|
|
case BPF_TASK_ITER_ALL_PROCS:
|
bpf: Let bpf_iter_task_new accept null task ptr
When using task_iter to iterate all threads of a specific task, we enforce
that the user must pass a valid task pointer to ensure safety. However,
when iterating all threads/process in the system, BPF verifier still
require a valid ptr instead of "nullable" pointer, even though it's
pointless, which is a kind of surprising from usability standpoint. It
would be nice if we could let that kfunc accept a explicit null pointer
when we are using BPF_TASK_ITER_ALL_{PROCS, THREADS} and a valid pointer
when using BPF_TASK_ITER_THREAD.
Given a trival kfunc:
__bpf_kfunc void FN(struct TYPE_A *obj);
BPF Prog would reject a nullptr for obj. The error info is:
"arg#x pointer type xx xx must point to scalar, or struct with scalar"
reported by get_kfunc_ptr_arg_type(). The reg->type is SCALAR_VALUE and
the btf type of ref_t is not scalar or scalar_struct which leads to the
rejection of get_kfunc_ptr_arg_type.
This patch add "__nullable" annotation:
__bpf_kfunc void FN(struct TYPE_A *obj__nullable);
Here __nullable indicates obj can be optional, user can pass a explicit
nullptr or a normal TYPE_A pointer. In get_kfunc_ptr_arg_type(), we will
detect whether the current arg is optional and register is null, If so,
return a new kfunc_ptr_arg_type KF_ARG_PTR_TO_NULL and skip to the next
arg in check_kfunc_args().
Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231018061746.111364-7-zhouchuyi@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-10-18 06:17:44 +00:00
|
|
|
break;
|
2023-10-18 06:17:41 +00:00
|
|
|
case BPF_TASK_ITER_PROC_THREADS:
|
bpf: Let bpf_iter_task_new accept null task ptr
When using task_iter to iterate all threads of a specific task, we enforce
that the user must pass a valid task pointer to ensure safety. However,
when iterating all threads/process in the system, BPF verifier still
require a valid ptr instead of "nullable" pointer, even though it's
pointless, which is a kind of surprising from usability standpoint. It
would be nice if we could let that kfunc accept a explicit null pointer
when we are using BPF_TASK_ITER_ALL_{PROCS, THREADS} and a valid pointer
when using BPF_TASK_ITER_THREAD.
Given a trival kfunc:
__bpf_kfunc void FN(struct TYPE_A *obj);
BPF Prog would reject a nullptr for obj. The error info is:
"arg#x pointer type xx xx must point to scalar, or struct with scalar"
reported by get_kfunc_ptr_arg_type(). The reg->type is SCALAR_VALUE and
the btf type of ref_t is not scalar or scalar_struct which leads to the
rejection of get_kfunc_ptr_arg_type.
This patch add "__nullable" annotation:
__bpf_kfunc void FN(struct TYPE_A *obj__nullable);
Here __nullable indicates obj can be optional, user can pass a explicit
nullptr or a normal TYPE_A pointer. In get_kfunc_ptr_arg_type(), we will
detect whether the current arg is optional and register is null, If so,
return a new kfunc_ptr_arg_type KF_ARG_PTR_TO_NULL and skip to the next
arg in check_kfunc_args().
Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231018061746.111364-7-zhouchuyi@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-10-18 06:17:44 +00:00
|
|
|
if (!task__nullable)
|
|
|
|
return -EINVAL;
|
2023-10-18 06:17:41 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags == BPF_TASK_ITER_PROC_THREADS)
|
bpf: Let bpf_iter_task_new accept null task ptr
When using task_iter to iterate all threads of a specific task, we enforce
that the user must pass a valid task pointer to ensure safety. However,
when iterating all threads/process in the system, BPF verifier still
require a valid ptr instead of "nullable" pointer, even though it's
pointless, which is a kind of surprising from usability standpoint. It
would be nice if we could let that kfunc accept a explicit null pointer
when we are using BPF_TASK_ITER_ALL_{PROCS, THREADS} and a valid pointer
when using BPF_TASK_ITER_THREAD.
Given a trival kfunc:
__bpf_kfunc void FN(struct TYPE_A *obj);
BPF Prog would reject a nullptr for obj. The error info is:
"arg#x pointer type xx xx must point to scalar, or struct with scalar"
reported by get_kfunc_ptr_arg_type(). The reg->type is SCALAR_VALUE and
the btf type of ref_t is not scalar or scalar_struct which leads to the
rejection of get_kfunc_ptr_arg_type.
This patch add "__nullable" annotation:
__bpf_kfunc void FN(struct TYPE_A *obj__nullable);
Here __nullable indicates obj can be optional, user can pass a explicit
nullptr or a normal TYPE_A pointer. In get_kfunc_ptr_arg_type(), we will
detect whether the current arg is optional and register is null, If so,
return a new kfunc_ptr_arg_type KF_ARG_PTR_TO_NULL and skip to the next
arg in check_kfunc_args().
Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231018061746.111364-7-zhouchuyi@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-10-18 06:17:44 +00:00
|
|
|
kit->task = task__nullable;
|
2023-10-18 06:17:41 +00:00
|
|
|
else
|
|
|
|
kit->task = &init_task;
|
|
|
|
kit->pos = kit->task;
|
|
|
|
kit->flags = flags;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__bpf_kfunc struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it)
|
|
|
|
{
|
|
|
|
struct bpf_iter_task_kern *kit = (void *)it;
|
|
|
|
struct task_struct *pos;
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
flags = kit->flags;
|
|
|
|
pos = kit->pos;
|
|
|
|
|
|
|
|
if (!pos)
|
|
|
|
return pos;
|
|
|
|
|
|
|
|
if (flags == BPF_TASK_ITER_ALL_PROCS)
|
|
|
|
goto get_next_task;
|
|
|
|
|
2023-11-14 16:32:37 +00:00
|
|
|
kit->pos = __next_thread(kit->pos);
|
2023-11-14 16:32:39 +00:00
|
|
|
if (kit->pos || flags == BPF_TASK_ITER_PROC_THREADS)
|
2023-10-18 06:17:41 +00:00
|
|
|
return pos;
|
|
|
|
|
|
|
|
get_next_task:
|
2023-11-14 16:32:39 +00:00
|
|
|
kit->task = next_task(kit->task);
|
|
|
|
if (kit->task == &init_task)
|
2023-10-18 06:17:41 +00:00
|
|
|
kit->pos = NULL;
|
2023-11-14 16:32:39 +00:00
|
|
|
else
|
|
|
|
kit->pos = kit->task;
|
2023-10-18 06:17:41 +00:00
|
|
|
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
__bpf_kfunc void bpf_iter_task_destroy(struct bpf_iter_task *it)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2023-10-31 21:56:24 +00:00
|
|
|
__bpf_kfunc_end_defs();
|
2023-10-18 06:17:41 +00:00
|
|
|
|
2021-11-05 23:23:29 +00:00
|
|
|
DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
|
|
|
|
|
|
|
|
static void do_mmap_read_unlock(struct irq_work *entry)
|
|
|
|
{
|
|
|
|
struct mmap_unlock_irq_work *work;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
|
|
|
|
mmap_read_unlock_non_owner(work->mm);
|
|
|
|
}
|
|
|
|
|
2020-05-09 17:59:11 +00:00
|
|
|
static int __init task_iter_init(void)
|
|
|
|
{
|
2021-11-05 23:23:29 +00:00
|
|
|
struct mmap_unlock_irq_work *work;
|
|
|
|
int ret, cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
work = per_cpu_ptr(&mmap_unlock_work, cpu);
|
|
|
|
init_irq_work(&work->irq_work, do_mmap_read_unlock);
|
|
|
|
}
|
2020-05-09 17:59:11 +00:00
|
|
|
|
2021-11-12 15:02:43 +00:00
|
|
|
task_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
|
2020-05-09 17:59:11 +00:00
|
|
|
ret = bpf_iter_reg_target(&task_reg_info);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-11-12 15:02:43 +00:00
|
|
|
task_file_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
|
|
|
|
task_file_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_FILE];
|
2021-02-12 18:31:05 +00:00
|
|
|
ret = bpf_iter_reg_target(&task_file_reg_info);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-11-12 15:02:43 +00:00
|
|
|
task_vma_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
|
|
|
|
task_vma_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
|
2021-02-12 18:31:05 +00:00
|
|
|
return bpf_iter_reg_target(&task_vma_reg_info);
|
2020-05-09 17:59:11 +00:00
|
|
|
}
|
|
|
|
late_initcall(task_iter_init);
|