2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* drivers/power/process.c - Functions for starting/stopping processes on
|
|
|
|
* suspend transitions.
|
|
|
|
*
|
|
|
|
* Originally from swsusp.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#undef DEBUG
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
2009-09-22 00:03:09 +00:00
|
|
|
#include <linux/oom.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/suspend.h>
|
|
|
|
#include <linux/module.h>
|
2006-03-23 11:00:04 +00:00
|
|
|
#include <linux/syscalls.h>
|
2006-12-07 04:34:23 +00:00
|
|
|
#include <linux/freezer.h>
|
2009-10-08 20:47:30 +00:00
|
|
|
#include <linux/delay.h>
|
2010-06-29 08:07:12 +00:00
|
|
|
#include <linux/workqueue.h>
|
2012-03-28 21:30:21 +00:00
|
|
|
#include <linux/kmod.h>
|
2014-06-06 12:40:17 +00:00
|
|
|
#include <trace/events/power.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Timeout for stopping processes
|
|
|
|
*/
|
2013-02-01 08:56:03 +00:00
|
|
|
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-11-21 20:32:26 +00:00
|
|
|
static int try_to_freeze_tasks(bool user_only)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
2006-12-07 04:34:40 +00:00
|
|
|
unsigned long end_time;
|
|
|
|
unsigned int todo;
|
2010-06-29 08:07:12 +00:00
|
|
|
bool wq_busy = false;
|
2007-10-18 10:04:49 +00:00
|
|
|
struct timeval start, end;
|
2013-05-06 23:50:10 +00:00
|
|
|
u64 elapsed_msecs64;
|
|
|
|
unsigned int elapsed_msecs;
|
2010-10-04 20:07:32 +00:00
|
|
|
bool wakeup = false;
|
2013-05-06 23:50:10 +00:00
|
|
|
int sleep_usecs = USEC_PER_MSEC;
|
2007-10-18 10:04:49 +00:00
|
|
|
|
|
|
|
do_gettimeofday(&start);
|
2005-06-25 06:13:50 +00:00
|
|
|
|
2013-02-01 08:56:03 +00:00
|
|
|
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
|
2010-06-29 08:07:12 +00:00
|
|
|
|
2011-11-21 20:32:26 +00:00
|
|
|
if (!user_only)
|
2010-06-29 08:07:12 +00:00
|
|
|
freeze_workqueues_begin();
|
|
|
|
|
2009-10-08 20:47:30 +00:00
|
|
|
while (true) {
|
2006-12-07 04:34:40 +00:00
|
|
|
todo = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
read_lock(&tasklist_lock);
|
2014-10-21 07:27:15 +00:00
|
|
|
for_each_process_thread(g, p) {
|
2011-11-21 20:32:26 +00:00
|
|
|
if (p == current || !freeze_task(p))
|
2007-10-18 10:04:46 +00:00
|
|
|
continue;
|
|
|
|
|
2012-10-26 17:46:06 +00:00
|
|
|
if (!freezer_should_skip(p))
|
2007-05-23 20:57:25 +00:00
|
|
|
todo++;
|
2014-10-21 07:27:15 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
read_unlock(&tasklist_lock);
|
2010-06-29 08:07:12 +00:00
|
|
|
|
2011-11-21 20:32:26 +00:00
|
|
|
if (!user_only) {
|
2010-06-29 08:07:12 +00:00
|
|
|
wq_busy = freeze_workqueues_busy();
|
|
|
|
todo += wq_busy;
|
|
|
|
}
|
|
|
|
|
2009-10-08 20:47:30 +00:00
|
|
|
if (!todo || time_after(jiffies, end_time))
|
2005-09-03 22:57:05 +00:00
|
|
|
break;
|
2009-10-08 20:47:30 +00:00
|
|
|
|
2010-12-03 21:58:31 +00:00
|
|
|
if (pm_wakeup_pending()) {
|
2010-10-04 20:07:32 +00:00
|
|
|
wakeup = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-10-08 20:47:30 +00:00
|
|
|
/*
|
|
|
|
* We need to retry, but first give the freezing tasks some
|
2013-05-06 23:50:10 +00:00
|
|
|
* time to enter the refrigerator. Start with an initial
|
|
|
|
* 1 ms sleep followed by exponential backoff until 8 ms.
|
2009-10-08 20:47:30 +00:00
|
|
|
*/
|
2013-05-06 23:50:10 +00:00
|
|
|
usleep_range(sleep_usecs / 2, sleep_usecs);
|
|
|
|
if (sleep_usecs < 8 * USEC_PER_MSEC)
|
|
|
|
sleep_usecs *= 2;
|
2009-10-08 20:47:30 +00:00
|
|
|
}
|
2005-06-25 06:13:50 +00:00
|
|
|
|
2007-10-18 10:04:49 +00:00
|
|
|
do_gettimeofday(&end);
|
2013-05-06 23:50:10 +00:00
|
|
|
elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
|
|
|
|
do_div(elapsed_msecs64, NSEC_PER_MSEC);
|
|
|
|
elapsed_msecs = elapsed_msecs64;
|
2007-10-18 10:04:49 +00:00
|
|
|
|
2005-09-03 22:57:05 +00:00
|
|
|
if (todo) {
|
2006-12-07 04:34:26 +00:00
|
|
|
printk("\n");
|
2013-05-06 23:50:10 +00:00
|
|
|
printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
|
2010-06-29 08:07:12 +00:00
|
|
|
"(%d tasks refusing to freeze, wq_busy=%d):\n",
|
2010-10-04 20:07:32 +00:00
|
|
|
wakeup ? "aborted" : "failed",
|
2013-05-06 23:50:10 +00:00
|
|
|
elapsed_msecs / 1000, elapsed_msecs % 1000,
|
2010-06-29 08:07:12 +00:00
|
|
|
todo - wq_busy, wq_busy);
|
|
|
|
|
2012-02-10 23:00:34 +00:00
|
|
|
if (!wakeup) {
|
|
|
|
read_lock(&tasklist_lock);
|
2014-10-21 07:27:15 +00:00
|
|
|
for_each_process_thread(g, p) {
|
2012-02-10 23:00:34 +00:00
|
|
|
if (p != current && !freezer_should_skip(p)
|
|
|
|
&& freezing(p) && !frozen(p))
|
|
|
|
sched_show_task(p);
|
2014-10-21 07:27:15 +00:00
|
|
|
}
|
2012-02-10 23:00:34 +00:00
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
}
|
2007-10-18 10:04:49 +00:00
|
|
|
} else {
|
2013-05-06 23:50:10 +00:00
|
|
|
printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
|
|
|
|
elapsed_msecs % 1000);
|
2005-09-03 22:57:05 +00:00
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:34 +00:00
|
|
|
return todo ? -EBUSY : 0;
|
2006-12-07 04:34:40 +00:00
|
|
|
}
|
|
|
|
|
2014-10-22 20:47:32 +00:00
|
|
|
static bool __check_frozen_processes(void)
|
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
|
|
|
|
|
|
|
for_each_process_thread(g, p)
|
|
|
|
if (p != current && !freezer_should_skip(p) && !frozen(p))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-10-20 16:12:32 +00:00
|
|
|
/*
|
|
|
|
* Returns true if all freezable tasks (except for current) are frozen already
|
|
|
|
*/
|
|
|
|
static bool check_frozen_processes(void)
|
|
|
|
{
|
2014-10-22 20:47:32 +00:00
|
|
|
bool ret;
|
2014-10-20 16:12:32 +00:00
|
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
2014-10-22 20:47:32 +00:00
|
|
|
ret = __check_frozen_processes();
|
2014-10-20 16:12:32 +00:00
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-12-07 04:34:40 +00:00
|
|
|
/**
|
2011-09-26 18:32:27 +00:00
|
|
|
* freeze_processes - Signal user space processes to enter the refrigerator.
|
2013-07-25 00:41:33 +00:00
|
|
|
* The current thread will not be frozen. The same process that calls
|
|
|
|
* freeze_processes must later call thaw_processes.
|
2011-11-21 20:32:24 +00:00
|
|
|
*
|
|
|
|
* On success, returns 0. On failure, -errno and system is fully thawed.
|
2006-12-07 04:34:40 +00:00
|
|
|
*/
|
|
|
|
int freeze_processes(void)
|
|
|
|
{
|
2007-07-19 08:47:34 +00:00
|
|
|
int error;
|
2014-10-20 16:12:32 +00:00
|
|
|
int oom_kills_saved;
|
2006-12-07 04:34:40 +00:00
|
|
|
|
PM / Sleep: Mitigate race between the freezer and request_firmware()
There is a race condition between the freezer and request_firmware()
such that if request_firmware() is run on one CPU and
freeze_processes() is run on another CPU and usermodehelper_disable()
called by it succeeds to grab umhelper_sem for writing before
usermodehelper_read_trylock() called from request_firmware()
acquires it for reading, the request_firmware() will fail and
trigger a WARN_ON() complaining that it was called at a wrong time.
However, in fact, it wasn't called at a wrong time and
freeze_processes() simply happened to be executed simultaneously.
To avoid this race, at least in some cases, modify
usermodehelper_read_trylock() so that it doesn't fail if the
freezing of tasks has just started and hasn't been completed yet.
Instead, during the freezing of tasks, it will try to freeze the
task that has called it so that it can wait until user space is
thawed without triggering the scary warning.
For this purpose, change usermodehelper_disabled so that it can
take three different values, UMH_ENABLED (0), UMH_FREEZING and
UMH_DISABLED. The first one means that usermode helpers are
enabled, the last one means "hard disable" (i.e. the system is not
ready for usermode helpers to be used) and the second one
is reserved for the freezer. Namely, when freeze_processes() is
started, it sets usermodehelper_disabled to UMH_FREEZING which
tells usermodehelper_read_trylock() that it shouldn't fail just
yet and should call try_to_freeze() if woken up and cannot
return immediately. This way all freezable tasks that happen
to call request_firmware() right before freeze_processes() is
started and lose the race for umhelper_sem with it will be
frozen and will sleep until thaw_processes() unsets
usermodehelper_disabled. [For the non-freezable callers of
request_firmware() the race for umhelper_sem against
freeze_processes() is unfortunately unavoidable.]
Reported-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: stable@vger.kernel.org
2012-03-28 21:30:28 +00:00
|
|
|
error = __usermodehelper_disable(UMH_FREEZING);
|
2012-03-28 21:30:21 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2013-07-25 00:41:33 +00:00
|
|
|
/* Make sure this task doesn't get frozen */
|
|
|
|
current->flags |= PF_SUSPEND_TASK;
|
|
|
|
|
2011-11-21 20:32:25 +00:00
|
|
|
if (!pm_freezing)
|
|
|
|
atomic_inc(&system_freezing_cnt);
|
|
|
|
|
2014-09-01 11:47:49 +00:00
|
|
|
pm_wakeup_clear();
|
2007-10-18 10:04:48 +00:00
|
|
|
printk("Freezing user space processes ... ");
|
2011-11-21 20:32:25 +00:00
|
|
|
pm_freezing = true;
|
2014-10-20 16:12:32 +00:00
|
|
|
oom_kills_saved = oom_kills_count();
|
2008-06-11 20:04:29 +00:00
|
|
|
error = try_to_freeze_tasks(true);
|
2011-09-26 18:32:27 +00:00
|
|
|
if (!error) {
|
PM / Sleep: Mitigate race between the freezer and request_firmware()
There is a race condition between the freezer and request_firmware()
such that if request_firmware() is run on one CPU and
freeze_processes() is run on another CPU and usermodehelper_disable()
called by it succeeds to grab umhelper_sem for writing before
usermodehelper_read_trylock() called from request_firmware()
acquires it for reading, the request_firmware() will fail and
trigger a WARN_ON() complaining that it was called at a wrong time.
However, in fact, it wasn't called at a wrong time and
freeze_processes() simply happened to be executed simultaneously.
To avoid this race, at least in some cases, modify
usermodehelper_read_trylock() so that it doesn't fail if the
freezing of tasks has just started and hasn't been completed yet.
Instead, during the freezing of tasks, it will try to freeze the
task that has called it so that it can wait until user space is
thawed without triggering the scary warning.
For this purpose, change usermodehelper_disabled so that it can
take three different values, UMH_ENABLED (0), UMH_FREEZING and
UMH_DISABLED. The first one means that usermode helpers are
enabled, the last one means "hard disable" (i.e. the system is not
ready for usermode helpers to be used) and the second one
is reserved for the freezer. Namely, when freeze_processes() is
started, it sets usermodehelper_disabled to UMH_FREEZING which
tells usermodehelper_read_trylock() that it shouldn't fail just
yet and should call try_to_freeze() if woken up and cannot
return immediately. This way all freezable tasks that happen
to call request_firmware() right before freeze_processes() is
started and lose the race for umhelper_sem with it will be
frozen and will sleep until thaw_processes() unsets
usermodehelper_disabled. [For the non-freezable callers of
request_firmware() the race for umhelper_sem against
freeze_processes() is unfortunately unavoidable.]
Reported-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: stable@vger.kernel.org
2012-03-28 21:30:28 +00:00
|
|
|
__usermodehelper_set_disable_depth(UMH_DISABLED);
|
2011-09-26 18:32:27 +00:00
|
|
|
oom_killer_disable();
|
2014-10-20 16:12:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There might have been an OOM kill while we were
|
|
|
|
* freezing tasks and the killed task might be still
|
|
|
|
* on the way out so we have to double check for race.
|
|
|
|
*/
|
|
|
|
if (oom_kills_count() != oom_kills_saved &&
|
2014-10-22 20:47:32 +00:00
|
|
|
!check_frozen_processes()) {
|
2014-10-20 16:12:32 +00:00
|
|
|
__usermodehelper_set_disable_depth(UMH_ENABLED);
|
|
|
|
printk("OOM in progress.");
|
|
|
|
error = -EBUSY;
|
2014-10-22 20:47:32 +00:00
|
|
|
} else {
|
|
|
|
printk("done.");
|
2014-10-20 16:12:32 +00:00
|
|
|
}
|
2011-09-26 18:32:27 +00:00
|
|
|
}
|
|
|
|
printk("\n");
|
|
|
|
BUG_ON(in_atomic());
|
|
|
|
|
2011-11-21 20:32:24 +00:00
|
|
|
if (error)
|
|
|
|
thaw_processes();
|
2011-09-26 18:32:27 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
|
2011-11-21 20:32:24 +00:00
|
|
|
*
|
PM / Freezer: Thaw only kernel threads if freezing of kernel threads fails
If freezing of kernel threads fails, we are expected to automatically
thaw tasks in the error recovery path. However, at times, we encounter
situations in which we would like the automatic error recovery path
to thaw only the kernel threads, because we want to be able to do
some more cleanup before we thaw userspace. Something like:
error = freeze_kernel_threads();
if (error) {
/* Do some cleanup */
/* Only then thaw userspace tasks*/
thaw_processes();
}
An example of such a situation is where we freeze/thaw filesystems
during suspend/hibernation. There, if freezing of kernel threads
fails, we would like to thaw the frozen filesystems before thawing
the userspace tasks.
So, modify freeze_kernel_threads() to thaw only kernel threads in
case of freezing failure. And change suspend_freeze_processes()
accordingly. (At the same time, let us also get rid of the rather
cryptic usage of the conditional operator (:?) in that function.)
[rjw: In fact, this patch fixes a regression introduced during the
3.3 merge window, because without it thaw_processes() may be called
before swsusp_free() in some situations and that may lead to massive
memory allocation failures.]
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Nigel Cunningham <nigel@tuxonice.net>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-02-03 21:22:41 +00:00
|
|
|
* On success, returns 0. On failure, -errno and only the kernel threads are
|
|
|
|
* thawed, so as to give a chance to the caller to do additional cleanups
|
|
|
|
* (if any) before thawing the userspace tasks. So, it is the responsibility
|
|
|
|
* of the caller to thaw the userspace tasks, when the time is right.
|
2011-09-26 18:32:27 +00:00
|
|
|
*/
|
|
|
|
int freeze_kernel_threads(void)
|
|
|
|
{
|
|
|
|
int error;
|
2006-12-07 04:34:40 +00:00
|
|
|
|
2007-10-18 10:04:48 +00:00
|
|
|
printk("Freezing remaining freezable tasks ... ");
|
2011-11-21 20:32:25 +00:00
|
|
|
pm_nosig_freezing = true;
|
2008-06-11 20:04:29 +00:00
|
|
|
error = try_to_freeze_tasks(false);
|
2011-09-26 18:32:27 +00:00
|
|
|
if (!error)
|
|
|
|
printk("done.");
|
2009-06-16 22:32:41 +00:00
|
|
|
|
2007-10-18 10:04:48 +00:00
|
|
|
printk("\n");
|
2011-09-26 18:32:27 +00:00
|
|
|
BUG_ON(in_atomic());
|
2009-06-16 22:32:41 +00:00
|
|
|
|
2011-11-21 20:32:24 +00:00
|
|
|
if (error)
|
PM / Freezer: Thaw only kernel threads if freezing of kernel threads fails
If freezing of kernel threads fails, we are expected to automatically
thaw tasks in the error recovery path. However, at times, we encounter
situations in which we would like the automatic error recovery path
to thaw only the kernel threads, because we want to be able to do
some more cleanup before we thaw userspace. Something like:
error = freeze_kernel_threads();
if (error) {
/* Do some cleanup */
/* Only then thaw userspace tasks*/
thaw_processes();
}
An example of such a situation is where we freeze/thaw filesystems
during suspend/hibernation. There, if freezing of kernel threads
fails, we would like to thaw the frozen filesystems before thawing
the userspace tasks.
So, modify freeze_kernel_threads() to thaw only kernel threads in
case of freezing failure. And change suspend_freeze_processes()
accordingly. (At the same time, let us also get rid of the rather
cryptic usage of the conditional operator (:?) in that function.)
[rjw: In fact, this patch fixes a regression introduced during the
3.3 merge window, because without it thaw_processes() may be called
before swsusp_free() in some situations and that may lead to massive
memory allocation failures.]
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Nigel Cunningham <nigel@tuxonice.net>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-02-03 21:22:41 +00:00
|
|
|
thaw_kernel_threads();
|
2007-10-18 10:04:48 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-11-21 20:32:23 +00:00
|
|
|
void thaw_processes(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
2013-07-25 00:41:33 +00:00
|
|
|
struct task_struct *curr = current;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-06-06 12:40:17 +00:00
|
|
|
trace_suspend_resume(TPS("thaw_processes"), 0, true);
|
2011-11-21 20:32:25 +00:00
|
|
|
if (pm_freezing)
|
|
|
|
atomic_dec(&system_freezing_cnt);
|
|
|
|
pm_freezing = false;
|
|
|
|
pm_nosig_freezing = false;
|
|
|
|
|
2011-11-21 20:32:23 +00:00
|
|
|
oom_killer_enable();
|
|
|
|
|
|
|
|
printk("Restarting tasks ... ");
|
|
|
|
|
2014-07-15 06:51:27 +00:00
|
|
|
__usermodehelper_set_disable_depth(UMH_FREEZING);
|
2011-11-21 20:32:23 +00:00
|
|
|
thaw_workqueues();
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
read_lock(&tasklist_lock);
|
2014-10-21 07:27:15 +00:00
|
|
|
for_each_process_thread(g, p) {
|
2013-07-25 00:41:33 +00:00
|
|
|
/* No other threads should have PF_SUSPEND_TASK set */
|
|
|
|
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
|
2011-11-21 20:32:23 +00:00
|
|
|
__thaw_task(p);
|
2014-10-21 07:27:15 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
read_unlock(&tasklist_lock);
|
2009-06-16 22:32:41 +00:00
|
|
|
|
2013-07-25 00:41:33 +00:00
|
|
|
WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
|
|
|
|
curr->flags &= ~PF_SUSPEND_TASK;
|
|
|
|
|
2012-03-28 21:30:21 +00:00
|
|
|
usermodehelper_enable();
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
schedule();
|
2006-12-07 04:34:26 +00:00
|
|
|
printk("done.\n");
|
2014-06-06 12:40:17 +00:00
|
|
|
trace_suspend_resume(TPS("thaw_processes"), 0, false);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-01-29 19:35:52 +00:00
|
|
|
void thaw_kernel_threads(void)
|
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
|
|
|
|
|
|
|
pm_nosig_freezing = false;
|
|
|
|
printk("Restarting kernel threads ... ");
|
|
|
|
|
|
|
|
thaw_workqueues();
|
|
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
2014-10-21 07:27:15 +00:00
|
|
|
for_each_process_thread(g, p) {
|
2012-01-29 19:35:52 +00:00
|
|
|
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
|
|
|
|
__thaw_task(p);
|
2014-10-21 07:27:15 +00:00
|
|
|
}
|
2012-01-29 19:35:52 +00:00
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
|
|
|
|
schedule();
|
|
|
|
printk("done.\n");
|
|
|
|
}
|