Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "The gcc-4.4.4 workaround has actually been merged into a KVM tree by
  Paolo but it is stuck in linux-next and mainline needs it"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  arch/x86/kvm/mmu.c: work around gcc-4.4.4 bug
  sched, numa: do not hint for NUMA balancing on VM_MIXEDMAP mappings
  zsmalloc: fix a null pointer dereference in destroy_handle_cache()
  mm: memcontrol: fix false-positive VM_BUG_ON() on -rt
  checkpatch: fix "GLOBAL_INITIALISERS" test
  zram: clear disk io accounting when reset zram device
  memcg: do not call reclaim if !__GFP_WAIT
  mm/memory_hotplug.c: set zone->wait_table to null after freeing it
This commit is contained in:
Linus Torvalds 2015-06-10 16:43:53 -07:00
commit f5278565be
7 changed files with 20 additions and 15 deletions

View File

@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
u64 entry, gentry, *spte;
int npte;
bool remote_flush, local_flush, zap_page;
union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
.cr0_wp = 1,
.cr4_pae = 1,
.nxe = 1,
.smep_andnot_wp = 1,
.smap_andnot_wp = 1,
};
union kvm_mmu_page_role mask = { };
mask.cr0_wp = 1;
mask.cr4_pae = 1;
mask.nxe = 1;
mask.smep_andnot_wp = 1;
mask.smap_andnot_wp = 1;
/*
* If we don't have indirect shadow pages, it means no page is

View File

@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
zram->max_comp_streams = 1;
set_capacity(zram->disk, 0);
part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */

View File

@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work)
}
for (; vma; vma = vma->vm_next) {
if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma)) {
is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
continue;
}

View File

@ -2323,6 +2323,8 @@ done_restock:
css_get_many(&memcg->css, batch);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);
if (!(gfp_mask & __GFP_WAIT))
goto done;
/*
* If the hierarchy is above the normal consumption range,
* make the charging task trim their excess contribution.
@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, 1);
/* XXX: caller holds IRQ-safe mapping->tree_lock */
VM_BUG_ON(!irqs_disabled());
/* Caller disabled preemption with mapping->tree_lock */
mem_cgroup_charge_statistics(memcg, page, -1);
memcg_check_events(memcg, page);
}

View File

@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
* wait_table may be allocated from boot memory,
* here only free if it's allocated by vmalloc.
*/
if (is_vmalloc_addr(zone->wait_table))
if (is_vmalloc_addr(zone->wait_table)) {
vfree(zone->wait_table);
zone->wait_table = NULL;
}
}
}
EXPORT_SYMBOL(try_offline_node);

View File

@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool)
static void destroy_handle_cache(struct zs_pool *pool)
{
kmem_cache_destroy(pool->handle_cachep);
if (pool->handle_cachep)
kmem_cache_destroy(pool->handle_cachep);
}
static unsigned long alloc_handle(struct zs_pool *pool)

View File

@ -3169,12 +3169,12 @@ sub process {
}
# check for global initialisers.
if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) {
if (ERROR("GLOBAL_INITIALISERS",
"do not initialise globals to 0 or NULL\n" .
$herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
$fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/;
}
}
# check for static initialisers.