forked from Minki/linux
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "11 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: kasan: do not sanitize kexec purgatory drivers/rapidio/devices/tsi721.c: make module parameter variable name unique mm/hugetlb.c: don't call region_abort if region_chg fails kasan: report only the first error by default hugetlbfs: initialize shared policy as part of inode allocation mm: fix section name for .data..ro_after_init mm, hugetlb: use pte_present() instead of pmd_present() in follow_huge_pmd() mm: workingset: fix premature shadow node shrinking with cgroups mm: rmap: fix huge file mmap accounting in the memcg stats mm: move mm_percpu_wq initialization earlier mm: migrate: fix remove_migration_pte() for ksm pages
This commit is contained in:
commit
978e0f92cd
@ -1725,6 +1725,12 @@
|
||||
kernel and module base offset ASLR (Address Space
|
||||
Layout Randomization).
|
||||
|
||||
kasan_multi_shot
|
||||
[KNL] Enforce KASAN (Kernel Address Sanitizer) to print
|
||||
report on every invalid memory access. Without this
|
||||
parameter KASAN will print report only for the first
|
||||
invalid access.
|
||||
|
||||
keepinitrd [HW,ARM]
|
||||
|
||||
kernelcore= [KNL,X86,IA-64,PPC]
|
||||
|
@ -4,6 +4,5 @@
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
extern char _eshared[], _ehead[];
|
||||
extern char __start_ro_after_init[], __end_ro_after_init[];
|
||||
|
||||
#endif
|
||||
|
@ -63,11 +63,9 @@ SECTIONS
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__start_ro_after_init = .;
|
||||
__start_data_ro_after_init = .;
|
||||
.data..ro_after_init : {
|
||||
*(.data..ro_after_init)
|
||||
}
|
||||
__end_data_ro_after_init = .;
|
||||
EXCEPTION_TABLE(16)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__end_ro_after_init = .;
|
||||
|
@ -8,6 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
|
||||
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
|
||||
targets += purgatory.ro
|
||||
|
||||
KASAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
|
||||
|
@ -37,8 +37,8 @@
|
||||
#include "tsi721.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
u32 dbg_level;
|
||||
module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
|
||||
u32 tsi_dbg_level;
|
||||
module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
|
||||
MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
|
||||
#endif
|
||||
|
||||
|
@ -40,11 +40,11 @@ enum {
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
extern u32 dbg_level;
|
||||
extern u32 tsi_dbg_level;
|
||||
|
||||
#define tsi_debug(level, dev, fmt, arg...) \
|
||||
do { \
|
||||
if (DBG_##level & dbg_level) \
|
||||
if (DBG_##level & tsi_dbg_level) \
|
||||
dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
|
||||
} while (0)
|
||||
#else
|
||||
|
@ -695,14 +695,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (inode) {
|
||||
struct hugetlbfs_inode_info *info;
|
||||
inode->i_ino = get_next_ino();
|
||||
inode->i_mode = S_IFDIR | config->mode;
|
||||
inode->i_uid = config->uid;
|
||||
inode->i_gid = config->gid;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
|
||||
info = HUGETLBFS_I(inode);
|
||||
mpol_shared_policy_init(&info->policy, NULL);
|
||||
inode->i_op = &hugetlbfs_dir_inode_operations;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
/* directory inodes start off with i_nlink == 2 (for "." entry) */
|
||||
@ -733,7 +730,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (inode) {
|
||||
struct hugetlbfs_inode_info *info;
|
||||
inode->i_ino = get_next_ino();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
|
||||
@ -741,15 +737,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
|
||||
inode->i_mapping->a_ops = &hugetlbfs_aops;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
|
||||
inode->i_mapping->private_data = resv_map;
|
||||
info = HUGETLBFS_I(inode);
|
||||
/*
|
||||
* The policy is initialized here even if we are creating a
|
||||
* private inode because initialization simply creates an
|
||||
* an empty rb tree and calls rwlock_init(), later when we
|
||||
* call mpol_free_shared_policy() it will just return because
|
||||
* the rb tree will still be empty.
|
||||
*/
|
||||
mpol_shared_policy_init(&info->policy, NULL);
|
||||
switch (mode & S_IFMT) {
|
||||
default:
|
||||
init_special_inode(inode, mode, dev);
|
||||
@ -937,6 +924,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
|
||||
hugetlbfs_inc_free_inodes(sbinfo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any time after allocation, hugetlbfs_destroy_inode can be called
|
||||
* for the inode. mpol_free_shared_policy is unconditionally called
|
||||
* as part of hugetlbfs_destroy_inode. So, initialize policy here
|
||||
* in case of a quick call to destroy.
|
||||
*
|
||||
* Note that the policy is initialized even if we are creating a
|
||||
* private inode. This simplifies hugetlbfs_destroy_inode.
|
||||
*/
|
||||
mpol_shared_policy_init(&p->policy, NULL);
|
||||
|
||||
return &p->vfs_inode;
|
||||
}
|
||||
|
||||
|
@ -14,8 +14,8 @@
|
||||
* [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
|
||||
* and/or .init.* sections.
|
||||
* [__start_rodata, __end_rodata]: contains .rodata.* sections
|
||||
* [__start_data_ro_after_init, __end_data_ro_after_init]:
|
||||
* contains data.ro_after_init section
|
||||
* [__start_ro_after_init, __end_ro_after_init]:
|
||||
* contains .data..ro_after_init section
|
||||
* [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
|
||||
* may be out of this range on some architectures.
|
||||
* [_sinittext, _einittext]: contains .init.text.* sections
|
||||
@ -33,7 +33,7 @@ extern char _data[], _sdata[], _edata[];
|
||||
extern char __bss_start[], __bss_stop[];
|
||||
extern char __init_begin[], __init_end[];
|
||||
extern char _sinittext[], _einittext[];
|
||||
extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
|
||||
extern char __start_ro_after_init[], __end_ro_after_init[];
|
||||
extern char _end[];
|
||||
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
|
||||
extern char __kprobes_text_start[], __kprobes_text_end[];
|
||||
|
@ -260,9 +260,9 @@
|
||||
*/
|
||||
#ifndef RO_AFTER_INIT_DATA
|
||||
#define RO_AFTER_INIT_DATA \
|
||||
__start_data_ro_after_init = .; \
|
||||
__start_ro_after_init = .; \
|
||||
*(.data..ro_after_init) \
|
||||
__end_data_ro_after_init = .;
|
||||
__end_ro_after_init = .;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -76,6 +76,9 @@ size_t ksize(const void *);
|
||||
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
|
||||
size_t kasan_metadata_size(struct kmem_cache *cache);
|
||||
|
||||
bool kasan_save_enable_multi_shot(void);
|
||||
void kasan_restore_multi_shot(bool enabled);
|
||||
|
||||
#else /* CONFIG_KASAN */
|
||||
|
||||
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
||||
|
@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_update_page_stat(struct page *page,
|
||||
enum mem_cgroup_stat_index idx,
|
||||
int nr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
||||
enum mem_cgroup_stat_index idx)
|
||||
{
|
||||
|
@ -32,6 +32,8 @@ struct user_struct;
|
||||
struct writeback_control;
|
||||
struct bdi_writeback;
|
||||
|
||||
void init_mm_internals(void);
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
|
||||
extern unsigned long max_mapnr;
|
||||
|
||||
|
@ -1022,6 +1022,8 @@ static noinline void __init kernel_init_freeable(void)
|
||||
|
||||
workqueue_init();
|
||||
|
||||
init_mm_internals();
|
||||
|
||||
do_pre_smp_initcalls();
|
||||
lockup_detector_init();
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
/*
|
||||
* Note: test functions are marked noinline so that their names appear in
|
||||
@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
|
||||
|
||||
static int __init kmalloc_tests_init(void)
|
||||
{
|
||||
/*
|
||||
* Temporarily enable multi-shot mode. Otherwise, we'd only get a
|
||||
* report for the first case.
|
||||
*/
|
||||
bool multishot = kasan_save_enable_multi_shot();
|
||||
|
||||
kmalloc_oob_right();
|
||||
kmalloc_oob_left();
|
||||
kmalloc_node_oob_right();
|
||||
@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
|
||||
ksize_unpoisons_memory();
|
||||
copy_user_test();
|
||||
use_after_scope_test();
|
||||
|
||||
kasan_restore_multi_shot(multishot);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
10
mm/hugetlb.c
10
mm/hugetlb.c
@ -4403,7 +4403,9 @@ int hugetlb_reserve_pages(struct inode *inode,
|
||||
return 0;
|
||||
out_err:
|
||||
if (!vma || vma->vm_flags & VM_MAYSHARE)
|
||||
region_abort(resv_map, from, to);
|
||||
/* Don't call region_abort if region_chg failed */
|
||||
if (chg >= 0)
|
||||
region_abort(resv_map, from, to);
|
||||
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
||||
kref_put(&resv_map->refs, resv_map_release);
|
||||
return ret;
|
||||
@ -4651,6 +4653,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
{
|
||||
struct page *page = NULL;
|
||||
spinlock_t *ptl;
|
||||
pte_t pte;
|
||||
retry:
|
||||
ptl = pmd_lockptr(mm, pmd);
|
||||
spin_lock(ptl);
|
||||
@ -4660,12 +4663,13 @@ retry:
|
||||
*/
|
||||
if (!pmd_huge(*pmd))
|
||||
goto out;
|
||||
if (pmd_present(*pmd)) {
|
||||
pte = huge_ptep_get((pte_t *)pmd);
|
||||
if (pte_present(pte)) {
|
||||
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
|
||||
if (flags & FOLL_GET)
|
||||
get_page(page);
|
||||
} else {
|
||||
if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
|
||||
if (is_hugetlb_entry_migration(pte)) {
|
||||
spin_unlock(ptl);
|
||||
__migration_entry_wait(mm, (pte_t *)pmd, ptl);
|
||||
goto retry;
|
||||
|
@ -96,11 +96,6 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
|
||||
<< KASAN_SHADOW_SCALE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool kasan_report_enabled(void)
|
||||
{
|
||||
return !current->kasan_depth;
|
||||
}
|
||||
|
||||
void kasan_report(unsigned long addr, size_t size,
|
||||
bool is_write, unsigned long ip);
|
||||
void kasan_report_double_free(struct kmem_cache *cache, void *object,
|
||||
|
@ -13,7 +13,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/printk.h>
|
||||
@ -293,6 +295,40 @@ static void kasan_report_error(struct kasan_access_info *info)
|
||||
kasan_end_report(&flags);
|
||||
}
|
||||
|
||||
static unsigned long kasan_flags;
|
||||
|
||||
#define KASAN_BIT_REPORTED 0
|
||||
#define KASAN_BIT_MULTI_SHOT 1
|
||||
|
||||
bool kasan_save_enable_multi_shot(void)
|
||||
{
|
||||
return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
|
||||
|
||||
void kasan_restore_multi_shot(bool enabled)
|
||||
{
|
||||
if (!enabled)
|
||||
clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
|
||||
|
||||
static int __init kasan_set_multi_shot(char *str)
|
||||
{
|
||||
set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
|
||||
return 1;
|
||||
}
|
||||
__setup("kasan_multi_shot", kasan_set_multi_shot);
|
||||
|
||||
static inline bool kasan_report_enabled(void)
|
||||
{
|
||||
if (current->kasan_depth)
|
||||
return false;
|
||||
if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
|
||||
return true;
|
||||
return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
|
||||
}
|
||||
|
||||
void kasan_report(unsigned long addr, size_t size,
|
||||
bool is_write, unsigned long ip)
|
||||
{
|
||||
|
@ -1416,7 +1416,7 @@ static void kmemleak_scan(void)
|
||||
/* data/bss scanning */
|
||||
scan_large_block(_sdata, _edata);
|
||||
scan_large_block(__bss_start, __bss_stop);
|
||||
scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
|
||||
scan_large_block(__start_ro_after_init, __end_ro_after_init);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* per-cpu sections scanning */
|
||||
|
@ -209,8 +209,11 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
|
||||
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
while (page_vma_mapped_walk(&pvmw)) {
|
||||
new = page - pvmw.page->index +
|
||||
linear_page_index(vma, pvmw.address);
|
||||
if (PageKsm(page))
|
||||
new = page;
|
||||
else
|
||||
new = page - pvmw.page->index +
|
||||
linear_page_index(vma, pvmw.address);
|
||||
|
||||
get_page(new);
|
||||
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
|
||||
|
@ -1159,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound)
|
||||
goto out;
|
||||
}
|
||||
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
|
||||
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
||||
mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
|
||||
out:
|
||||
unlock_page_memcg(page);
|
||||
}
|
||||
@ -1199,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
|
||||
* pte lock(a spinlock) is held, which implies preemption disabled.
|
||||
*/
|
||||
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
|
||||
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
||||
mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
|
||||
|
||||
if (unlikely(PageMlocked(page)))
|
||||
clear_page_mlock(page);
|
||||
|
@ -1764,7 +1764,7 @@ static int vmstat_cpu_dead(unsigned int cpu)
|
||||
|
||||
#endif
|
||||
|
||||
static int __init setup_vmstat(void)
|
||||
void __init init_mm_internals(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int ret;
|
||||
@ -1792,9 +1792,7 @@ static int __init setup_vmstat(void)
|
||||
proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
|
||||
proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
module_init(setup_vmstat)
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
|
||||
|
||||
|
@ -532,7 +532,7 @@ static int __init workingset_init(void)
|
||||
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
|
||||
timestamp_bits, max_order, bucket_order);
|
||||
|
||||
ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key);
|
||||
ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = register_shrinker(&workingset_shadow_shrinker);
|
||||
|
Loading…
Reference in New Issue
Block a user