Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "191 patches. Subsystems affected by this patch series: kthread, ia64, scripts, ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab, slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap, mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization, pagealloc, and memory-failure)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (191 commits) mm,hwpoison: make get_hwpoison_page() call get_any_page() mm,hwpoison: send SIGBUS with error virutal address mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes mm/page_alloc: allow high-order pages to be stored on the per-cpu lists mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA docs: remove description of DISCONTIGMEM arch, mm: remove stale mentions of DISCONIGMEM mm: remove CONFIG_DISCONTIGMEM m68k: remove support for DISCONTIGMEM arc: remove support for DISCONTIGMEM arc: update comment about HIGHMEM implementation alpha: remove DISCONTIGMEM and NUMA mm/page_alloc: move free_the_page mm/page_alloc: fix counting of managed_pages mm/page_alloc: improve memmap_pages dbg msg mm: drop SECTION_SHIFT in code comments mm/page_alloc: introduce vm.percpu_pagelist_high_fraction mm/page_alloc: limit the number of pages on PCP lists when reclaim is active mm/page_alloc: scale the number of pages that are batch freed ...
This commit is contained in:
@@ -1162,14 +1162,14 @@ static bool __kthread_cancel_work(struct kthread_work *work)
|
||||
* modify @dwork's timer so that it expires after @delay. If @delay is zero,
|
||||
* @work is guaranteed to be queued immediately.
|
||||
*
|
||||
* Return: %true if @dwork was pending and its timer was modified,
|
||||
* %false otherwise.
|
||||
* Return: %false if @dwork was idle and queued, %true otherwise.
|
||||
*
|
||||
* A special case is when the work is being canceled in parallel.
|
||||
* It might be caused either by the real kthread_cancel_delayed_work_sync()
|
||||
* or yet another kthread_mod_delayed_work() call. We let the other command
|
||||
* win and return %false here. The caller is supposed to synchronize these
|
||||
* operations a reasonable way.
|
||||
* win and return %true here. The return value can be used for reference
|
||||
* counting and the number of queued works stays the same. Anyway, the caller
|
||||
* is supposed to synchronize these operations a reasonable way.
|
||||
*
|
||||
* This function is safe to call from any context including IRQ handler.
|
||||
* See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
|
||||
@@ -1181,13 +1181,15 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
|
||||
{
|
||||
struct kthread_work *work = &dwork->work;
|
||||
unsigned long flags;
|
||||
int ret = false;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&worker->lock, flags);
|
||||
|
||||
/* Do not bother with canceling when never queued. */
|
||||
if (!work->worker)
|
||||
if (!work->worker) {
|
||||
ret = false;
|
||||
goto fast_queue;
|
||||
}
|
||||
|
||||
/* Work must not be used with >1 worker, see kthread_queue_work() */
|
||||
WARN_ON_ONCE(work->worker != worker);
|
||||
@@ -1205,8 +1207,11 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
|
||||
* be used for reference counting.
|
||||
*/
|
||||
kthread_cancel_delayed_work_timer(work, &flags);
|
||||
if (work->canceling)
|
||||
if (work->canceling) {
|
||||
/* The number of works in the queue does not change. */
|
||||
ret = true;
|
||||
goto out;
|
||||
}
|
||||
ret = __kthread_cancel_work(work);
|
||||
|
||||
fast_queue:
|
||||
|
||||
Reference in New Issue
Block a user