|
|
|
|
@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
|
|
|
|
|
static int worker_thread(void *__worker);
|
|
|
|
|
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
|
|
|
|
static void show_pwq(struct pool_workqueue *pwq);
|
|
|
|
|
static void show_one_worker_pool(struct worker_pool *pool);
|
|
|
|
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
|
#include <trace/events/workqueue.h>
|
|
|
|
|
@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|
|
|
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
|
|
|
|
mutex_unlock(&wq->mutex);
|
|
|
|
|
mutex_unlock(&wq_pool_mutex);
|
|
|
|
|
show_workqueue_state();
|
|
|
|
|
show_one_workqueue(wq);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
|
|
|
|
@@ -4797,96 +4798,115 @@ static void show_pwq(struct pool_workqueue *pwq)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* show_workqueue_state - dump workqueue state
|
|
|
|
|
* show_one_workqueue - dump state of specified workqueue
|
|
|
|
|
* @wq: workqueue whose state will be printed
|
|
|
|
|
*/
|
|
|
|
|
void show_one_workqueue(struct workqueue_struct *wq)
|
|
|
|
|
{
|
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
|
bool idle = true;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
for_each_pwq(pwq, wq) {
|
|
|
|
|
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
|
|
|
|
idle = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (idle) /* Nothing to print for idle workqueue */
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
|
|
|
|
|
|
|
|
|
|
for_each_pwq(pwq, wq) {
|
|
|
|
|
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
|
|
|
|
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
|
|
|
|
/*
|
|
|
|
|
* Defer printing to avoid deadlocks in console
|
|
|
|
|
* drivers that queue work while holding locks
|
|
|
|
|
* also taken in their write paths.
|
|
|
|
|
*/
|
|
|
|
|
printk_deferred_enter();
|
|
|
|
|
show_pwq(pwq);
|
|
|
|
|
printk_deferred_exit();
|
|
|
|
|
}
|
|
|
|
|
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
|
|
|
|
/*
|
|
|
|
|
* We could be printing a lot from atomic context, e.g.
|
|
|
|
|
* sysrq-t -> show_all_workqueues(). Avoid triggering
|
|
|
|
|
* hard lockup.
|
|
|
|
|
*/
|
|
|
|
|
touch_nmi_watchdog();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* show_one_worker_pool - dump state of specified worker pool
|
|
|
|
|
* @pool: worker pool whose state will be printed
|
|
|
|
|
*/
|
|
|
|
|
static void show_one_worker_pool(struct worker_pool *pool)
|
|
|
|
|
{
|
|
|
|
|
struct worker *worker;
|
|
|
|
|
bool first = true;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&pool->lock, flags);
|
|
|
|
|
if (pool->nr_workers == pool->nr_idle)
|
|
|
|
|
goto next_pool;
|
|
|
|
|
/*
|
|
|
|
|
* Defer printing to avoid deadlocks in console drivers that
|
|
|
|
|
* queue work while holding locks also taken in their write
|
|
|
|
|
* paths.
|
|
|
|
|
*/
|
|
|
|
|
printk_deferred_enter();
|
|
|
|
|
pr_info("pool %d:", pool->id);
|
|
|
|
|
pr_cont_pool_info(pool);
|
|
|
|
|
pr_cont(" hung=%us workers=%d",
|
|
|
|
|
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
|
|
|
|
|
pool->nr_workers);
|
|
|
|
|
if (pool->manager)
|
|
|
|
|
pr_cont(" manager: %d",
|
|
|
|
|
task_pid_nr(pool->manager->task));
|
|
|
|
|
list_for_each_entry(worker, &pool->idle_list, entry) {
|
|
|
|
|
pr_cont(" %s%d", first ? "idle: " : "",
|
|
|
|
|
task_pid_nr(worker->task));
|
|
|
|
|
first = false;
|
|
|
|
|
}
|
|
|
|
|
pr_cont("\n");
|
|
|
|
|
printk_deferred_exit();
|
|
|
|
|
next_pool:
|
|
|
|
|
raw_spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
|
/*
|
|
|
|
|
* We could be printing a lot from atomic context, e.g.
|
|
|
|
|
* sysrq-t -> show_all_workqueues(). Avoid triggering
|
|
|
|
|
* hard lockup.
|
|
|
|
|
*/
|
|
|
|
|
touch_nmi_watchdog();
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* show_all_workqueues - dump workqueue state
|
|
|
|
|
*
|
|
|
|
|
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
|
|
|
|
|
* all busy workqueues and pools.
|
|
|
|
|
*/
|
|
|
|
|
void show_workqueue_state(void)
|
|
|
|
|
void show_all_workqueues(void)
|
|
|
|
|
{
|
|
|
|
|
struct workqueue_struct *wq;
|
|
|
|
|
struct worker_pool *pool;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
int pi;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
|
|
pr_info("Showing busy workqueues and worker pools:\n");
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(wq, &workqueues, list) {
|
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
|
bool idle = true;
|
|
|
|
|
list_for_each_entry_rcu(wq, &workqueues, list)
|
|
|
|
|
show_one_workqueue(wq);
|
|
|
|
|
|
|
|
|
|
for_each_pwq(pwq, wq) {
|
|
|
|
|
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
|
|
|
|
idle = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (idle)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
|
|
|
|
|
|
|
|
|
|
for_each_pwq(pwq, wq) {
|
|
|
|
|
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
|
|
|
|
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
|
|
|
|
/*
|
|
|
|
|
* Defer printing to avoid deadlocks in console
|
|
|
|
|
* drivers that queue work while holding locks
|
|
|
|
|
* also taken in their write paths.
|
|
|
|
|
*/
|
|
|
|
|
printk_deferred_enter();
|
|
|
|
|
show_pwq(pwq);
|
|
|
|
|
printk_deferred_exit();
|
|
|
|
|
}
|
|
|
|
|
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
|
|
|
|
/*
|
|
|
|
|
* We could be printing a lot from atomic context, e.g.
|
|
|
|
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
|
|
|
|
* hard lockup.
|
|
|
|
|
*/
|
|
|
|
|
touch_nmi_watchdog();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for_each_pool(pool, pi) {
|
|
|
|
|
struct worker *worker;
|
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&pool->lock, flags);
|
|
|
|
|
if (pool->nr_workers == pool->nr_idle)
|
|
|
|
|
goto next_pool;
|
|
|
|
|
/*
|
|
|
|
|
* Defer printing to avoid deadlocks in console drivers that
|
|
|
|
|
* queue work while holding locks also taken in their write
|
|
|
|
|
* paths.
|
|
|
|
|
*/
|
|
|
|
|
printk_deferred_enter();
|
|
|
|
|
pr_info("pool %d:", pool->id);
|
|
|
|
|
pr_cont_pool_info(pool);
|
|
|
|
|
pr_cont(" hung=%us workers=%d",
|
|
|
|
|
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
|
|
|
|
|
pool->nr_workers);
|
|
|
|
|
if (pool->manager)
|
|
|
|
|
pr_cont(" manager: %d",
|
|
|
|
|
task_pid_nr(pool->manager->task));
|
|
|
|
|
list_for_each_entry(worker, &pool->idle_list, entry) {
|
|
|
|
|
pr_cont(" %s%d", first ? "idle: " : "",
|
|
|
|
|
task_pid_nr(worker->task));
|
|
|
|
|
first = false;
|
|
|
|
|
}
|
|
|
|
|
pr_cont("\n");
|
|
|
|
|
printk_deferred_exit();
|
|
|
|
|
next_pool:
|
|
|
|
|
raw_spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
|
/*
|
|
|
|
|
* We could be printing a lot from atomic context, e.g.
|
|
|
|
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
|
|
|
|
* hard lockup.
|
|
|
|
|
*/
|
|
|
|
|
touch_nmi_watchdog();
|
|
|
|
|
}
|
|
|
|
|
for_each_pool(pool, pi)
|
|
|
|
|
show_one_worker_pool(pool);
|
|
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
}
|
|
|
|
|
@@ -5876,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
|
|
if (lockup_detected)
|
|
|
|
|
show_workqueue_state();
|
|
|
|
|
show_all_workqueues();
|
|
|
|
|
|
|
|
|
|
wq_watchdog_reset_touched();
|
|
|
|
|
mod_timer(&wq_watchdog_timer, jiffies + thresh);
|
|
|
|
|
|