Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "191 patches. Subsystems affected by this patch series: kthread, ia64, scripts, ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab, slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap, mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization, pagealloc, and memory-failure)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (191 commits) mm,hwpoison: make get_hwpoison_page() call get_any_page() mm,hwpoison: send SIGBUS with error virutal address mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes mm/page_alloc: allow high-order pages to be stored on the per-cpu lists mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA docs: remove description of DISCONTIGMEM arch, mm: remove stale mentions of DISCONIGMEM mm: remove CONFIG_DISCONTIGMEM m68k: remove support for DISCONTIGMEM arc: remove support for DISCONTIGMEM arc: update comment about HIGHMEM implementation alpha: remove DISCONTIGMEM and NUMA mm/page_alloc: move free_the_page mm/page_alloc: fix counting of managed_pages mm/page_alloc: improve memmap_pages dbg msg mm: drop SECTION_SHIFT in code comments mm/page_alloc: introduce vm.percpu_pagelist_high_fraction mm/page_alloc: limit the number of pages on PCP lists when reclaim is active mm/page_alloc: scale the number of pages that are batch freed ...
This commit is contained in:
@@ -577,6 +577,7 @@ out_unlock:
|
||||
rcu_read_unlock();
|
||||
return css;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cgroup_get_e_css);
|
||||
|
||||
static void cgroup_get_live(struct cgroup *cgrp)
|
||||
{
|
||||
|
||||
@@ -455,7 +455,7 @@ static int __init crash_save_vmcoreinfo_init(void)
|
||||
VMCOREINFO_SYMBOL(_stext);
|
||||
VMCOREINFO_SYMBOL(vmap_area_list);
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
#ifndef CONFIG_NUMA
|
||||
VMCOREINFO_SYMBOL(mem_map);
|
||||
VMCOREINFO_SYMBOL(contig_page_data);
|
||||
#endif
|
||||
@@ -484,7 +484,7 @@ static int __init crash_save_vmcoreinfo_init(void)
|
||||
VMCOREINFO_OFFSET(page, compound_head);
|
||||
VMCOREINFO_OFFSET(pglist_data, node_zones);
|
||||
VMCOREINFO_OFFSET(pglist_data, nr_zones);
|
||||
#ifdef CONFIG_FLAT_NODE_MEM_MAP
|
||||
#ifdef CONFIG_FLATMEM
|
||||
VMCOREINFO_OFFSET(pglist_data, node_mem_map);
|
||||
#endif
|
||||
VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
|
||||
|
||||
@@ -8309,8 +8309,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
|
||||
|
||||
if (vma->vm_flags & VM_DENYWRITE)
|
||||
flags |= MAP_DENYWRITE;
|
||||
if (vma->vm_flags & VM_MAYEXEC)
|
||||
flags |= MAP_EXECUTABLE;
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
flags |= MAP_LOCKED;
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
|
||||
@@ -2047,8 +2047,8 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
mmap_read_lock(mm);
|
||||
vma = find_vma(mm, bp_vaddr);
|
||||
if (vma && vma->vm_start <= bp_vaddr) {
|
||||
vma = vma_lookup(mm, bp_vaddr);
|
||||
if (vma) {
|
||||
if (valid_vma(vma, false)) {
|
||||
struct inode *inode = file_inode(vma->vm_file);
|
||||
loff_t offset = vaddr_to_offset(vma, bp_vaddr);
|
||||
|
||||
@@ -1035,7 +1035,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
||||
mm_pgtables_bytes_init(mm);
|
||||
mm->map_count = 0;
|
||||
mm->locked_vm = 0;
|
||||
atomic_set(&mm->has_pinned, 0);
|
||||
atomic64_set(&mm->pinned_vm, 0);
|
||||
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
|
||||
spin_lock_init(&mm->page_table_lock);
|
||||
|
||||
@@ -1162,14 +1162,14 @@ static bool __kthread_cancel_work(struct kthread_work *work)
|
||||
* modify @dwork's timer so that it expires after @delay. If @delay is zero,
|
||||
* @work is guaranteed to be queued immediately.
|
||||
*
|
||||
* Return: %true if @dwork was pending and its timer was modified,
|
||||
* %false otherwise.
|
||||
* Return: %false if @dwork was idle and queued, %true otherwise.
|
||||
*
|
||||
* A special case is when the work is being canceled in parallel.
|
||||
* It might be caused either by the real kthread_cancel_delayed_work_sync()
|
||||
* or yet another kthread_mod_delayed_work() call. We let the other command
|
||||
* win and return %false here. The caller is supposed to synchronize these
|
||||
* operations a reasonable way.
|
||||
* win and return %true here. The return value can be used for reference
|
||||
* counting and the number of queued works stays the same. Anyway, the caller
|
||||
* is supposed to synchronize these operations a reasonable way.
|
||||
*
|
||||
* This function is safe to call from any context including IRQ handler.
|
||||
* See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
|
||||
@@ -1181,13 +1181,15 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
|
||||
{
|
||||
struct kthread_work *work = &dwork->work;
|
||||
unsigned long flags;
|
||||
int ret = false;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&worker->lock, flags);
|
||||
|
||||
/* Do not bother with canceling when never queued. */
|
||||
if (!work->worker)
|
||||
if (!work->worker) {
|
||||
ret = false;
|
||||
goto fast_queue;
|
||||
}
|
||||
|
||||
/* Work must not be used with >1 worker, see kthread_queue_work() */
|
||||
WARN_ON_ONCE(work->worker != worker);
|
||||
@@ -1205,8 +1207,11 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
|
||||
* be used for reference counting.
|
||||
*/
|
||||
kthread_cancel_delayed_work_timer(work, &flags);
|
||||
if (work->canceling)
|
||||
if (work->canceling) {
|
||||
/* The number of works in the queue does not change. */
|
||||
ret = true;
|
||||
goto out;
|
||||
}
|
||||
ret = __kthread_cancel_work(work);
|
||||
|
||||
fast_queue:
|
||||
|
||||
@@ -2921,11 +2921,11 @@ static struct ctl_table vm_table[] = {
|
||||
.extra2 = &one_thousand,
|
||||
},
|
||||
{
|
||||
.procname = "percpu_pagelist_fraction",
|
||||
.data = &percpu_pagelist_fraction,
|
||||
.maxlen = sizeof(percpu_pagelist_fraction),
|
||||
.procname = "percpu_pagelist_high_fraction",
|
||||
.data = &percpu_pagelist_high_fraction,
|
||||
.maxlen = sizeof(percpu_pagelist_high_fraction),
|
||||
.mode = 0644,
|
||||
.proc_handler = percpu_pagelist_fraction_sysctl_handler,
|
||||
.proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -92,7 +92,7 @@ __setup("nmi_watchdog=", hardlockup_panic_setup);
|
||||
* own hardlockup detector.
|
||||
*
|
||||
* watchdog_nmi_enable/disable can be implemented to start and stop when
|
||||
* softlockup watchdog threads start and stop. The arch must select the
|
||||
* softlockup watchdog start and stop. The arch must select the
|
||||
* SOFTLOCKUP_DETECTOR Kconfig.
|
||||
*/
|
||||
int __weak watchdog_nmi_enable(unsigned int cpu)
|
||||
@@ -335,7 +335,7 @@ static DEFINE_PER_CPU(struct completion, softlockup_completion);
|
||||
static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
|
||||
|
||||
/*
|
||||
* The watchdog thread function - touches the timestamp.
|
||||
* The watchdog feed function - touches the timestamp.
|
||||
*
|
||||
* It only runs once every sample_period seconds (4 seconds by
|
||||
* default) to reset the softlockup timestamp. If this gets delayed
|
||||
@@ -558,11 +558,7 @@ static void lockup_detector_reconfigure(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the watchdog thread infrastructure and configure the detector(s).
|
||||
*
|
||||
* The threads are not unparked as watchdog_allowed_mask is empty. When
|
||||
* the threads are successfully initialized, take the proper locks and
|
||||
* unpark the threads in the watchdog_cpumask if the watchdog is enabled.
|
||||
* Create the watchdog infrastructure and configure the detector(s).
|
||||
*/
|
||||
static __init void lockup_detector_setup(void)
|
||||
{
|
||||
@@ -628,7 +624,7 @@ void lockup_detector_soft_poweroff(void)
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
/* Propagate any changes to the watchdog threads */
|
||||
/* Propagate any changes to the watchdog infrastructure */
|
||||
static void proc_watchdog_update(void)
|
||||
{
|
||||
/* Remove impossible cpus to keep sysctl output clean. */
|
||||
|
||||
Reference in New Issue
Block a user