forked from Minki/linux
[PATCH] run_posix_cpu_timers: remove a bogus BUG_ON()
do_exit() clears ->it_##clock##_expires, but nothing prevents another cpu to attach the timer to exiting process after that. arm_timer() tries to protect against this race, but the check is racy. After exit_notify() does 'write_unlock_irq(&tasklist_lock)' and before do_exit() calls 'schedule() local timer interrupt can find tsk->exit_state != 0. If that state was EXIT_DEAD (or another cpu does sys_wait4) interrupted task has ->signal == NULL. At this moment exiting task has no pending cpu timers, they were cleanuped in __exit_signal()->posix_cpu_timers_exit{,_group}(), so we can just return from irq. John Stultz recently confirmed this bug, see http://marc.theaimsgroup.com/?l=linux-kernel&m=115015841413687 Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
8f17fc20bf
commit
30f1e3dd8c
@ -881,14 +881,6 @@ fastcall NORET_TYPE void do_exit(long code)
|
||||
|
||||
tsk->flags |= PF_EXITING;
|
||||
|
||||
/*
|
||||
* Make sure we don't try to process any timer firings
|
||||
* while we are already exiting.
|
||||
*/
|
||||
tsk->it_virt_expires = cputime_zero;
|
||||
tsk->it_prof_expires = cputime_zero;
|
||||
tsk->it_sched_expires = 0;
|
||||
|
||||
if (unlikely(in_atomic()))
|
||||
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
||||
current->comm, current->pid,
|
||||
|
@ -1288,30 +1288,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
||||
|
||||
#undef UNEXPIRED
|
||||
|
||||
BUG_ON(tsk->exit_state);
|
||||
|
||||
/*
|
||||
* Double-check with locks held.
|
||||
*/
|
||||
read_lock(&tasklist_lock);
|
||||
spin_lock(&tsk->sighand->siglock);
|
||||
if (likely(tsk->signal != NULL)) {
|
||||
spin_lock(&tsk->sighand->siglock);
|
||||
|
||||
/*
|
||||
* Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
|
||||
* all the timers that are firing, and put them on the firing list.
|
||||
*/
|
||||
check_thread_timers(tsk, &firing);
|
||||
check_process_timers(tsk, &firing);
|
||||
/*
|
||||
* Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
|
||||
* all the timers that are firing, and put them on the firing list.
|
||||
*/
|
||||
check_thread_timers(tsk, &firing);
|
||||
check_process_timers(tsk, &firing);
|
||||
|
||||
/*
|
||||
* We must release these locks before taking any timer's lock.
|
||||
* There is a potential race with timer deletion here, as the
|
||||
* siglock now protects our private firing list. We have set
|
||||
* the firing flag in each timer, so that a deletion attempt
|
||||
* that gets the timer lock before we do will give it up and
|
||||
* spin until we've taken care of that timer below.
|
||||
*/
|
||||
spin_unlock(&tsk->sighand->siglock);
|
||||
/*
|
||||
* We must release these locks before taking any timer's lock.
|
||||
* There is a potential race with timer deletion here, as the
|
||||
* siglock now protects our private firing list. We have set
|
||||
* the firing flag in each timer, so that a deletion attempt
|
||||
* that gets the timer lock before we do will give it up and
|
||||
* spin until we've taken care of that timer below.
|
||||
*/
|
||||
spin_unlock(&tsk->sighand->siglock);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user