mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
34b087e483
There's no in-kernel user of set_freezable_with_signal() left. Mixing TIF_SIGPENDING with kernel threads can lead to nasty corner cases as kernel threads never travel signal delivery path on their own. e.g. the current implementation is buggy in the cancelation path of __thaw_task(). It calls recalc_sigpending_and_wake() in an attempt to clear TIF_SIGPENDING but the function never clears it regardless of sigpending state. This means that signallable freezable kthreads may continue executing with !freezing() && stuck TIF_SIGPENDING, which can be troublesome. This patch removes set_freezable_with_signal() along with PF_FREEZER_NOSIG and recalc_sigpending*() calls in freezer. User tasks get TIF_SIGPENDING, kernel tasks get woken up and the spurious sigpending is dealt with in the usual signal delivery path. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Oleg Nesterov <oleg@redhat.com>
172 lines
4.2 KiB
C
172 lines
4.2 KiB
C
/*
|
|
* kernel/freezer.c - Function to freeze a process
|
|
*
|
|
* Originally from kernel/power/process.c
|
|
*/
|
|
|
|
#include <linux/interrupt.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/export.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/kthread.h>
|
|
|
|
/* total number of freezing conditions in effect */
|
|
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
|
|
EXPORT_SYMBOL(system_freezing_cnt);
|
|
|
|
/* indicate whether PM freezing is in effect, protected by pm_mutex */
|
|
bool pm_freezing;
|
|
bool pm_nosig_freezing;
|
|
|
|
/* protects freezing and frozen transitions */
|
|
static DEFINE_SPINLOCK(freezer_lock);
|
|
|
|
/**
|
|
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
|
* @p: task to be tested
|
|
*
|
|
* This function is called by freezing() if system_freezing_cnt isn't zero
|
|
* and tests whether @p needs to enter and stay in frozen state. Can be
|
|
* called under any context. The freezers are responsible for ensuring the
|
|
* target tasks see the updated state.
|
|
*/
|
|
bool freezing_slow_path(struct task_struct *p)
|
|
{
|
|
if (p->flags & PF_NOFREEZE)
|
|
return false;
|
|
|
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
|
return true;
|
|
|
|
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL(freezing_slow_path);
|
|
|
|
/* Refrigerator is place where frozen processes are stored :-). */
|
|
bool __refrigerator(bool check_kthr_stop)
|
|
{
|
|
/* Hmm, should we be allowed to suspend when there are realtime
|
|
processes around? */
|
|
bool was_frozen = false;
|
|
long save = current->state;
|
|
|
|
pr_debug("%s entered refrigerator\n", current->comm);
|
|
|
|
for (;;) {
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
spin_lock_irq(&freezer_lock);
|
|
current->flags |= PF_FROZEN;
|
|
if (!freezing(current) ||
|
|
(check_kthr_stop && kthread_should_stop()))
|
|
current->flags &= ~PF_FROZEN;
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
if (!(current->flags & PF_FROZEN))
|
|
break;
|
|
was_frozen = true;
|
|
schedule();
|
|
}
|
|
|
|
pr_debug("%s left refrigerator\n", current->comm);
|
|
|
|
/*
|
|
* Restore saved task state before returning. The mb'd version
|
|
* needs to be used; otherwise, it might silently break
|
|
* synchronization which depends on ordered task state change.
|
|
*/
|
|
set_current_state(save);
|
|
|
|
return was_frozen;
|
|
}
|
|
EXPORT_SYMBOL(__refrigerator);
|
|
|
|
static void fake_signal_wake_up(struct task_struct *p)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (lock_task_sighand(p, &flags)) {
|
|
signal_wake_up(p, 0);
|
|
unlock_task_sighand(p, &flags);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* freeze_task - send a freeze request to given task
|
|
* @p: task to send the request to
|
|
*
|
|
* If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
|
|
* flag and either sending a fake signal to it or waking it up, depending
|
|
* on whether it has %PF_FREEZER_NOSIG set.
|
|
*
|
|
* RETURNS:
|
|
* %false, if @p is not freezing or already frozen; %true, otherwise
|
|
*/
|
|
bool freeze_task(struct task_struct *p)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
|
if (!freezing(p) || frozen(p)) {
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
return false;
|
|
}
|
|
|
|
if (!(p->flags & PF_KTHREAD)) {
|
|
fake_signal_wake_up(p);
|
|
/*
|
|
* fake_signal_wake_up() goes through p's scheduler
|
|
* lock and guarantees that TASK_STOPPED/TRACED ->
|
|
* TASK_RUNNING transition can't race with task state
|
|
* testing in try_to_freeze_tasks().
|
|
*/
|
|
} else {
|
|
wake_up_state(p, TASK_INTERRUPTIBLE);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
return true;
|
|
}
|
|
|
|
void __thaw_task(struct task_struct *p)
|
|
{
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
|
|
* be visible to @p as waking up implies wmb. Waking up inside
|
|
* freezer_lock also prevents wakeups from leaking outside
|
|
* refrigerator.
|
|
*/
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
|
if (frozen(p))
|
|
wake_up_process(p);
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
}
|
|
|
|
/**
|
|
* set_freezable - make %current freezable
|
|
*
|
|
* Mark %current freezable and enter refrigerator if necessary.
|
|
*/
|
|
bool set_freezable(void)
|
|
{
|
|
might_sleep();
|
|
|
|
/*
|
|
* Modify flags while holding freezer_lock. This ensures the
|
|
* freezer notices that we aren't frozen yet or the freezing
|
|
* condition is visible to try_to_freeze() below.
|
|
*/
|
|
spin_lock_irq(&freezer_lock);
|
|
current->flags &= ~PF_NOFREEZE;
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
return try_to_freeze();
|
|
}
|
|
EXPORT_SYMBOL(set_freezable);
|