mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
35ce8ae9ae
Pull signal/exit/ptrace updates from Eric Biederman: "This set of changes deletes some dead code, makes a lot of cleanups which hopefully make the code easier to follow, and fixes bugs found along the way. The end-game which I have not yet reached yet is for fatal signals that generate coredumps to be short-circuit deliverable from complete_signal, for force_siginfo_to_task not to require changing userspace configured signal delivery state, and for the ptrace stops to always happen in locations where we can guarantee on all architectures that the all of the registers are saved and available on the stack. Removal of profile_task_ext, profile_munmap, and profile_handoff_task are the big successes for dead code removal this round. A bunch of small bug fixes are included, as most of the issues reported were small enough that they would not affect bisection so I simply added the fixes and did not fold the fixes into the changes they were fixing. There was a bug that broke coredumps piped to systemd-coredump. I dropped the change that caused that bug and replaced it entirely with something much more restrained. Unfortunately that required some rebasing. Some successes after this set of changes: There are few enough calls to do_exit to audit in a reasonable amount of time. The lifetime of struct kthread now matches the lifetime of struct task, and the pointer to struct kthread is no longer stored in set_child_tid. The flag SIGNAL_GROUP_COREDUMP is removed. The field group_exit_task is removed. Issues where task->exit_code was examined with signal->group_exit_code should been examined were fixed. There are several loosely related changes included because I am cleaning up and if I don't include them they will probably get lost. The original postings of these changes can be found at: https://lkml.kernel.org/r/87a6ha4zsd.fsf@email.froward.int.ebiederm.org https://lkml.kernel.org/r/87bl1kunjj.fsf@email.froward.int.ebiederm.org https://lkml.kernel.org/r/87r19opkx1.fsf_-_@email.froward.int.ebiederm.org I trimmed back the last set of changes to only the obviously correct once. Simply because there was less time for review than I had hoped" * 'signal-for-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (44 commits) ptrace/m68k: Stop open coding ptrace_report_syscall ptrace: Remove unused regs argument from ptrace_report_syscall ptrace: Remove second setting of PT_SEIZED in ptrace_attach taskstats: Cleanup the use of task->exit_code exit: Use the correct exit_code in /proc/<pid>/stat exit: Fix the exit_code for wait_task_zombie exit: Coredumps reach do_group_exit exit: Remove profile_handoff_task exit: Remove profile_task_exit & profile_munmap signal: clean up kernel-doc comments signal: Remove the helper signal_group_exit signal: Rename group_exit_task group_exec_task coredump: Stop setting signal->group_exit_task signal: Remove SIGNAL_GROUP_COREDUMP signal: During coredumps set SIGNAL_GROUP_EXIT in zap_process signal: Make coredump handling explicit in complete_signal signal: Have prepare_signal detect coredumps using signal->core_state signal: Have the oom killer detect coredumps using signal->core_state exit: Move force_uaccess back into do_exit exit: Guarantee make_task_dead leaks the tsk when calling do_task_exit ...
228 lines
5.1 KiB
C
228 lines
5.1 KiB
C
#ifndef INTERNAL_IO_WQ_H
|
|
#define INTERNAL_IO_WQ_H
|
|
|
|
#include <linux/refcount.h>
|
|
|
|
struct io_wq;
|
|
|
|
enum {
|
|
IO_WQ_WORK_CANCEL = 1,
|
|
IO_WQ_WORK_HASHED = 2,
|
|
IO_WQ_WORK_UNBOUND = 4,
|
|
IO_WQ_WORK_CONCURRENT = 16,
|
|
|
|
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
|
|
};
|
|
|
|
enum io_wq_cancel {
|
|
IO_WQ_CANCEL_OK, /* cancelled before started */
|
|
IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
|
|
IO_WQ_CANCEL_NOTFOUND, /* work not found */
|
|
};
|
|
|
|
struct io_wq_work_node {
|
|
struct io_wq_work_node *next;
|
|
};
|
|
|
|
struct io_wq_work_list {
|
|
struct io_wq_work_node *first;
|
|
struct io_wq_work_node *last;
|
|
};
|
|
|
|
#define wq_list_for_each(pos, prv, head) \
|
|
for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
|
|
|
|
#define wq_list_for_each_resume(pos, prv) \
|
|
for (; pos; prv = pos, pos = (pos)->next)
|
|
|
|
#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
|
|
#define INIT_WQ_LIST(list) do { \
|
|
(list)->first = NULL; \
|
|
} while (0)
|
|
|
|
static inline void wq_list_add_after(struct io_wq_work_node *node,
|
|
struct io_wq_work_node *pos,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
struct io_wq_work_node *next = pos->next;
|
|
|
|
pos->next = node;
|
|
node->next = next;
|
|
if (!next)
|
|
list->last = node;
|
|
}
|
|
|
|
/**
|
|
* wq_list_merge - merge the second list to the first one.
|
|
* @list0: the first list
|
|
* @list1: the second list
|
|
* Return the first node after mergence.
|
|
*/
|
|
static inline struct io_wq_work_node *wq_list_merge(struct io_wq_work_list *list0,
|
|
struct io_wq_work_list *list1)
|
|
{
|
|
struct io_wq_work_node *ret;
|
|
|
|
if (!list0->first) {
|
|
ret = list1->first;
|
|
} else {
|
|
ret = list0->first;
|
|
list0->last->next = list1->first;
|
|
}
|
|
INIT_WQ_LIST(list0);
|
|
INIT_WQ_LIST(list1);
|
|
return ret;
|
|
}
|
|
|
|
static inline void wq_list_add_tail(struct io_wq_work_node *node,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
node->next = NULL;
|
|
if (!list->first) {
|
|
list->last = node;
|
|
WRITE_ONCE(list->first, node);
|
|
} else {
|
|
list->last->next = node;
|
|
list->last = node;
|
|
}
|
|
}
|
|
|
|
static inline void wq_list_add_head(struct io_wq_work_node *node,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
node->next = list->first;
|
|
if (!node->next)
|
|
list->last = node;
|
|
WRITE_ONCE(list->first, node);
|
|
}
|
|
|
|
static inline void wq_list_cut(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *last,
|
|
struct io_wq_work_node *prev)
|
|
{
|
|
/* first in the list, if prev==NULL */
|
|
if (!prev)
|
|
WRITE_ONCE(list->first, last->next);
|
|
else
|
|
prev->next = last->next;
|
|
|
|
if (last == list->last)
|
|
list->last = prev;
|
|
last->next = NULL;
|
|
}
|
|
|
|
static inline void __wq_list_splice(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *to)
|
|
{
|
|
list->last->next = to->next;
|
|
to->next = list->first;
|
|
INIT_WQ_LIST(list);
|
|
}
|
|
|
|
static inline bool wq_list_splice(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *to)
|
|
{
|
|
if (!wq_list_empty(list)) {
|
|
__wq_list_splice(list, to);
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static inline void wq_stack_add_head(struct io_wq_work_node *node,
|
|
struct io_wq_work_node *stack)
|
|
{
|
|
node->next = stack->next;
|
|
stack->next = node;
|
|
}
|
|
|
|
static inline void wq_list_del(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *node,
|
|
struct io_wq_work_node *prev)
|
|
{
|
|
wq_list_cut(list, node, prev);
|
|
}
|
|
|
|
static inline
|
|
struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
|
|
{
|
|
struct io_wq_work_node *node = stack->next;
|
|
|
|
stack->next = node->next;
|
|
return node;
|
|
}
|
|
|
|
struct io_wq_work {
|
|
struct io_wq_work_node list;
|
|
unsigned flags;
|
|
};
|
|
|
|
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
|
|
{
|
|
if (!work->list.next)
|
|
return NULL;
|
|
|
|
return container_of(work->list.next, struct io_wq_work, list);
|
|
}
|
|
|
|
typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
|
|
typedef void (io_wq_work_fn)(struct io_wq_work *);
|
|
|
|
struct io_wq_hash {
|
|
refcount_t refs;
|
|
unsigned long map;
|
|
struct wait_queue_head wait;
|
|
};
|
|
|
|
static inline void io_wq_put_hash(struct io_wq_hash *hash)
|
|
{
|
|
if (refcount_dec_and_test(&hash->refs))
|
|
kfree(hash);
|
|
}
|
|
|
|
struct io_wq_data {
|
|
struct io_wq_hash *hash;
|
|
struct task_struct *task;
|
|
io_wq_work_fn *do_work;
|
|
free_work_fn *free_work;
|
|
};
|
|
|
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
|
|
void io_wq_exit_start(struct io_wq *wq);
|
|
void io_wq_put_and_exit(struct io_wq *wq);
|
|
|
|
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
|
void io_wq_hash_work(struct io_wq_work *work, void *val);
|
|
|
|
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
|
|
int io_wq_max_workers(struct io_wq *wq, int *new_count);
|
|
|
|
static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
|
{
|
|
return work->flags & IO_WQ_WORK_HASHED;
|
|
}
|
|
|
|
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
|
|
|
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
|
void *data, bool cancel_all);
|
|
|
|
#if defined(CONFIG_IO_WQ)
|
|
extern void io_wq_worker_sleeping(struct task_struct *);
|
|
extern void io_wq_worker_running(struct task_struct *);
|
|
#else
|
|
static inline void io_wq_worker_sleeping(struct task_struct *tsk)
|
|
{
|
|
}
|
|
static inline void io_wq_worker_running(struct task_struct *tsk)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
static inline bool io_wq_current_is_worker(void)
|
|
{
|
|
return in_task() && (current->flags & PF_IO_WORKER) &&
|
|
current->worker_private;
|
|
}
|
|
#endif
|