mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
workqueue: Move the code of waking a worker up in unbind_workers()
In unbind_workers(), there are two pool->lock held sections separated by the code of zapping nr_running. wake_up_worker() needs to be in pool->lock held section and after zapping nr_running. And zapping nr_running had to be after schedule() when the local wake up functionality was in use. Now, the call to schedule() has been removed along with the local wake up functionality, so the code can be merged into the same pool->lock held section. The diffstat shows that it is other code moved down because the diff tools can not know the meaning of merging lock sections by swapping two code blocks. Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
b4ac9384ac
commit
989442d737
@ -1810,14 +1810,8 @@ static void worker_enter_idle(struct worker *worker)
|
||||
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
|
||||
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
|
||||
|
||||
/*
|
||||
* Sanity check nr_running. Because unbind_workers() releases
|
||||
* pool->lock between setting %WORKER_UNBOUND and zapping
|
||||
* nr_running, the warning may trigger spuriously. Check iff
|
||||
* unbind is not in progress.
|
||||
*/
|
||||
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
|
||||
pool->nr_workers == pool->nr_idle &&
|
||||
/* Sanity check nr_running. */
|
||||
WARN_ON_ONCE(pool->nr_workers == pool->nr_idle &&
|
||||
atomic_read(&pool->nr_running));
|
||||
}
|
||||
|
||||
@ -4988,21 +4982,12 @@ static void unbind_workers(int cpu)
|
||||
|
||||
pool->flags |= POOL_DISASSOCIATED;
|
||||
|
||||
raw_spin_unlock_irq(&pool->lock);
|
||||
|
||||
for_each_pool_worker(worker, pool) {
|
||||
kthread_set_per_cpu(worker->task, -1);
|
||||
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&wq_pool_attach_mutex);
|
||||
|
||||
/*
|
||||
* Sched callbacks are disabled now. Zap nr_running.
|
||||
* After this, nr_running stays zero and need_more_worker()
|
||||
* and keep_working() are always true as long as the
|
||||
* worklist is not empty. This pool now behaves as an
|
||||
* unbound (in terms of concurrency management) pool which
|
||||
* The handling of nr_running in sched callbacks are disabled
|
||||
* now. Zap nr_running. After this, nr_running stays zero and
|
||||
* need_more_worker() and keep_working() are always true as
|
||||
* long as the worklist is not empty. This pool now behaves as
|
||||
* an unbound (in terms of concurrency management) pool which
|
||||
* are served by workers tied to the pool.
|
||||
*/
|
||||
atomic_set(&pool->nr_running, 0);
|
||||
@ -5012,9 +4997,16 @@ static void unbind_workers(int cpu)
|
||||
* worker blocking could lead to lengthy stalls. Kick off
|
||||
* unbound chain execution of currently pending work items.
|
||||
*/
|
||||
raw_spin_lock_irq(&pool->lock);
|
||||
wake_up_worker(pool);
|
||||
|
||||
raw_spin_unlock_irq(&pool->lock);
|
||||
|
||||
for_each_pool_worker(worker, pool) {
|
||||
kthread_set_per_cpu(worker->task, -1);
|
||||
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&wq_pool_attach_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user