forked from Minki/linux
workqueue: use rcu_read_lock_sched() instead for accessing pwq in RCU
rcu_read_lock_sched() is better than preempt_disable() if the code is protected by RCU_SCHED. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
951a078a52
commit
881094532e
@ -3962,7 +3962,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
||||
struct pool_workqueue *pwq;
|
||||
bool ret;
|
||||
|
||||
preempt_disable();
|
||||
rcu_read_lock_sched();
|
||||
|
||||
if (!(wq->flags & WQ_UNBOUND))
|
||||
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
|
||||
@ -3970,7 +3970,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
||||
pwq = first_pwq(wq);
|
||||
|
||||
ret = !list_empty(&pwq->delayed_works);
|
||||
preempt_enable();
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -4354,16 +4354,16 @@ bool freeze_workqueues_busy(void)
|
||||
* nr_active is monotonically decreasing. It's safe
|
||||
* to peek without lock.
|
||||
*/
|
||||
preempt_disable();
|
||||
rcu_read_lock_sched();
|
||||
for_each_pwq(pwq, wq) {
|
||||
WARN_ON_ONCE(pwq->nr_active < 0);
|
||||
if (pwq->nr_active) {
|
||||
busy = true;
|
||||
preempt_enable();
|
||||
rcu_read_unlock_sched();
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
preempt_enable();
|
||||
rcu_read_unlock_sched();
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&wq_mutex);
|
||||
|
Loading…
Reference in New Issue
Block a user