forked from Minki/linux
Merge branch 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fixes from Tejun Heo: "Workqueue has been incorrectly round-robining per-cpu work items. Hillf's patch fixes that. The other patch documents memory-ordering properties of workqueue operations" * 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: don't use wq_select_unbound_cpu() for bound works workqueue: Document (some) memory-ordering properties of {queue,schedule}_work()
This commit is contained in:
commit
2c1aca4bd3
@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
|
||||
*
|
||||
* We queue the work to the CPU on which it was submitted, but if the CPU dies
|
||||
* it can be processed by another CPU.
|
||||
*
|
||||
* Memory-ordering properties: If it returns %true, guarantees that all stores
|
||||
* preceding the call to queue_work() in the program order will be visible from
|
||||
* the CPU which will execute @work by the time such work executes, e.g.,
|
||||
*
|
||||
* { x is initially 0 }
|
||||
*
|
||||
* CPU0 CPU1
|
||||
*
|
||||
* WRITE_ONCE(x, 1); [ @work is being executed ]
|
||||
* r0 = queue_work(wq, work); r1 = READ_ONCE(x);
|
||||
*
|
||||
* Forbids: r0 == true && r1 == 0
|
||||
*/
|
||||
static inline bool queue_work(struct workqueue_struct *wq,
|
||||
struct work_struct *work)
|
||||
@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
|
||||
* This puts a job in the kernel-global workqueue if it was not already
|
||||
* queued and leaves it in the same position on the kernel-global
|
||||
* workqueue otherwise.
|
||||
*
|
||||
* Shares the same memory-ordering properties of queue_work(), cf. the
|
||||
* DocBook header of queue_work().
|
||||
*/
|
||||
static inline bool schedule_work(struct work_struct *work)
|
||||
{
|
||||
|
@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
||||
return;
|
||||
rcu_read_lock();
|
||||
retry:
|
||||
if (req_cpu == WORK_CPU_UNBOUND)
|
||||
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
|
||||
|
||||
/* pwq which will be used unless @work is executing elsewhere */
|
||||
if (!(wq->flags & WQ_UNBOUND))
|
||||
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
|
||||
else
|
||||
if (wq->flags & WQ_UNBOUND) {
|
||||
if (req_cpu == WORK_CPU_UNBOUND)
|
||||
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
|
||||
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
|
||||
} else {
|
||||
if (req_cpu == WORK_CPU_UNBOUND)
|
||||
cpu = raw_smp_processor_id();
|
||||
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* If @work was previously on a different pool, it might still be
|
||||
|
Loading…
Reference in New Issue
Block a user