Merge branch 'for-5.12-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:
 "Two workqueue fixes.

  One is around debugobj and poses no risk. The other is to prevent the
  stall watchdog from firing spuriously in certain conditions. Not as
  trivial as debugobj change but is still fairly low risk"

* 'for-5.12-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue/watchdog: Make unbound workqueues aware of touch_softlockup_watchdog() 84;0;0c84;0;0c There are two workqueue-specific watchdog timestamps:
  workqueue: Move the position of debug_work_activate() in __queue_work()
This commit is contained in:
Linus Torvalds 2021-04-05 09:35:58 -07:00
commit 0a50438c84
2 changed files with 10 additions and 14 deletions

View File

@ -278,9 +278,10 @@ void touch_all_softlockup_watchdogs(void)
* update as well, the only side effect might be a cycle delay for
* the softlockup check.
*/
for_each_cpu(cpu, &watchdog_allowed_mask)
for_each_cpu(cpu, &watchdog_allowed_mask) {
per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
wq_watchdog_touch(-1);
wq_watchdog_touch(cpu);
}
}
void touch_softlockup_watchdog_sync(void)

View File

@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
*/
lockdep_assert_irqs_disabled();
debug_work_activate(work);
/* if draining, only works from the same workqueue are allowed */
if (unlikely(wq->flags & __WQ_DRAINING) &&
@ -1494,6 +1493,7 @@ retry:
worklist = &pwq->delayed_works;
}
debug_work_activate(work);
insert_work(pwq, work, worklist, work_flags);
out:
@ -5787,22 +5787,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
continue;
/* get the latest of pool and touched timestamps */
if (pool->cpu >= 0)
touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
else
touched = READ_ONCE(wq_watchdog_touched);
pool_ts = READ_ONCE(pool->watchdog_ts);
touched = READ_ONCE(wq_watchdog_touched);
if (time_after(pool_ts, touched))
ts = pool_ts;
else
ts = touched;
if (pool->cpu >= 0) {
unsigned long cpu_touched =
READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
pool->cpu));
if (time_after(cpu_touched, ts))
ts = cpu_touched;
}
/* did we stall? */
if (time_after(jiffies, ts + thresh)) {
lockup_detected = true;
@ -5826,8 +5821,8 @@ notrace void wq_watchdog_touch(int cpu)
{
if (cpu >= 0)
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
else
wq_watchdog_touched = jiffies;
wq_watchdog_touched = jiffies;
}
static void wq_watchdog_set_thresh(unsigned long thresh)