mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 16:12:02 +00:00
9da33de624
ed3e694d
"move exit_task_work() past exit_files() et.al" destroyed
the add/exit synchronization we had, the caller itself should ensure
task_work_add() can't race with the exiting task.
However, this is not convenient/simple, and the only user which tries
to do this is buggy (see the next patch). Unless the task is current,
there is simply no way to do this in general.
Change exit_task_work()->task_work_run() to use the dummy "work_exited"
entry to let task_work_add() know it should fail.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20120826191211.GA4228@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
93 lines
2.2 KiB
C
93 lines
2.2 KiB
C
#include <linux/spinlock.h>
|
|
#include <linux/task_work.h>
|
|
#include <linux/tracehook.h>
|
|
|
|
static struct callback_head work_exited; /* all we need is ->next == NULL */
|
|
|
|
int
|
|
task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
|
|
{
|
|
struct callback_head *head;
|
|
|
|
do {
|
|
head = ACCESS_ONCE(task->task_works);
|
|
if (unlikely(head == &work_exited))
|
|
return -ESRCH;
|
|
work->next = head;
|
|
} while (cmpxchg(&task->task_works, head, work) != head);
|
|
|
|
if (notify)
|
|
set_notify_resume(task);
|
|
return 0;
|
|
}
|
|
|
|
struct callback_head *
|
|
task_work_cancel(struct task_struct *task, task_work_func_t func)
|
|
{
|
|
struct callback_head **pprev = &task->task_works;
|
|
struct callback_head *work = NULL;
|
|
unsigned long flags;
|
|
/*
|
|
* If cmpxchg() fails we continue without updating pprev.
|
|
* Either we raced with task_work_add() which added the
|
|
* new entry before this work, we will find it again. Or
|
|
* we raced with task_work_run(), *pprev == NULL/exited.
|
|
*/
|
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
while ((work = ACCESS_ONCE(*pprev))) {
|
|
read_barrier_depends();
|
|
if (work->func != func)
|
|
pprev = &work->next;
|
|
else if (cmpxchg(pprev, work, work->next) == work)
|
|
break;
|
|
}
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
|
return work;
|
|
}
|
|
|
|
void task_work_run(void)
|
|
{
|
|
struct task_struct *task = current;
|
|
struct callback_head *work, *head, *next;
|
|
|
|
for (;;) {
|
|
/*
|
|
* work->func() can do task_work_add(), do not set
|
|
* work_exited unless the list is empty.
|
|
*/
|
|
do {
|
|
work = ACCESS_ONCE(task->task_works);
|
|
head = !work && (task->flags & PF_EXITING) ?
|
|
&work_exited : NULL;
|
|
} while (cmpxchg(&task->task_works, work, head) != work);
|
|
|
|
if (!work)
|
|
break;
|
|
/*
|
|
* Synchronize with task_work_cancel(). It can't remove
|
|
* the first entry == work, cmpxchg(task_works) should
|
|
* fail, but it can play with *work and other entries.
|
|
*/
|
|
raw_spin_unlock_wait(&task->pi_lock);
|
|
smp_mb();
|
|
|
|
/* Reverse the list to run the works in fifo order */
|
|
head = NULL;
|
|
do {
|
|
next = work->next;
|
|
work->next = head;
|
|
head = work;
|
|
work = next;
|
|
} while (work);
|
|
|
|
work = head;
|
|
do {
|
|
next = work->next;
|
|
work->func(work);
|
|
work = next;
|
|
cond_resched();
|
|
} while (work);
|
|
}
|
|
}
|