Merge branch 'akpm' (patches from Andrew)
Merge third patch-bomb from Andrew Morton: - even more of the rest of MM - lib/ updates - checkpatch updates - small changes to a few scruffy filesystems - kmod fixes/cleanups - kexec updates - a dma-mapping cleanup series from hch * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (81 commits) dma-mapping: consolidate dma_set_mask dma-mapping: consolidate dma_supported dma-mapping: cosolidate dma_mapping_error dma-mapping: consolidate dma_{alloc,free}_noncoherent dma-mapping: consolidate dma_{alloc,free}_{attrs,coherent} mm: use vma_is_anonymous() in create_huge_pmd() and wp_huge_pmd() mm: make sure all file VMAs have ->vm_ops set mm, mpx: add "vm_flags_t vm_flags" arg to do_mmap_pgoff() mm: mark most vm_operations_struct const namei: fix warning while make xmldocs caused by namei.c ipc: convert invalid scenarios to use WARN_ON zlib_deflate/deftree: remove bi_reverse() lib/decompress_unlzma: Do a NULL check for pointer lib/decompressors: use real out buf size for gunzip with kernel fs/affs: make root lookup from blkdev logical size sysctl: fix int -> unsigned long assignments in INT_MIN case kexec: export KERNEL_IMAGE_SIZE to vmcoreinfo kexec: align crash_notes allocation to make it be inside one physical page kexec: remove unnecessary test in kimage_alloc_crash_control_pages() kexec: split kexec_load syscall from kexec core code ...
This commit is contained in:
@@ -16,7 +16,7 @@
|
||||
|
||||
#include <uapi/linux/kexec.h>
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
#include <linux/list.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/compat.h>
|
||||
@@ -318,13 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
|
||||
size_t crash_get_memory_size(void);
|
||||
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
|
||||
|
||||
#else /* !CONFIG_KEXEC */
|
||||
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len);
|
||||
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
|
||||
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
|
||||
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
|
||||
unsigned long buf_len);
|
||||
int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
|
||||
Elf_Shdr *sechdrs, unsigned int relsec);
|
||||
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
unsigned int relsec);
|
||||
|
||||
#else /* !CONFIG_KEXEC_CORE */
|
||||
struct pt_regs;
|
||||
struct task_struct;
|
||||
static inline void crash_kexec(struct pt_regs *regs) { }
|
||||
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
||||
#define kexec_in_progress false
|
||||
#endif /* CONFIG_KEXEC */
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
#endif /* !defined(__ASSEBMLY__) */
|
||||
|
||||
|
||||
@@ -85,8 +85,6 @@ enum umh_disable_depth {
|
||||
UMH_DISABLED,
|
||||
};
|
||||
|
||||
extern void usermodehelper_init(void);
|
||||
|
||||
extern int __usermodehelper_disable(enum umh_disable_depth depth);
|
||||
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
|
||||
|
||||
|
||||
@@ -305,11 +305,9 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
|
||||
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
|
||||
|
||||
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
|
||||
|
||||
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
|
||||
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
|
||||
|
||||
struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
|
||||
|
||||
static inline
|
||||
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
|
||||
return css ? container_of(css, struct mem_cgroup, css) : NULL;
|
||||
@@ -345,6 +343,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
|
||||
}
|
||||
|
||||
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
|
||||
ino_t page_cgroup_ino(struct page *page);
|
||||
|
||||
static inline bool mem_cgroup_disabled(void)
|
||||
{
|
||||
@@ -555,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
|
||||
return &zone->lruvec;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool mm_match_cgroup(struct mm_struct *mm,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
|
||||
@@ -1873,11 +1873,19 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
|
||||
|
||||
extern unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
|
||||
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
||||
extern unsigned long do_mmap(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long prot, unsigned long flags,
|
||||
unsigned long pgoff, unsigned long *populate);
|
||||
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
|
||||
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
|
||||
|
||||
static inline unsigned long
|
||||
do_mmap_pgoff(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long prot, unsigned long flags,
|
||||
unsigned long pgoff, unsigned long *populate)
|
||||
{
|
||||
return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int __mm_populate(unsigned long addr, unsigned long len,
|
||||
int ignore_errors);
|
||||
|
||||
@@ -65,6 +65,16 @@ struct mmu_notifier_ops {
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
/*
|
||||
* clear_young is a lightweight version of clear_flush_young. Like the
|
||||
* latter, it is supposed to test-and-clear the young/accessed bitflag
|
||||
* in the secondary pte, but it may omit flushing the secondary tlb.
|
||||
*/
|
||||
int (*clear_young)(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
/*
|
||||
* test_young is called to check the young/accessed bitflag in
|
||||
* the secondary pte. This is used to know if the page is
|
||||
@@ -203,6 +213,9 @@ extern void __mmu_notifier_release(struct mm_struct *mm);
|
||||
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
extern int __mmu_notifier_clear_young(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
extern int __mmu_notifier_test_young(struct mm_struct *mm,
|
||||
unsigned long address);
|
||||
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
|
||||
@@ -231,6 +244,15 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mmu_notifier_clear_young(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if (mm_has_notifiers(mm))
|
||||
return __mmu_notifier_clear_young(mm, start, end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mmu_notifier_test_young(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
@@ -311,6 +333,28 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
__young; \
|
||||
})
|
||||
|
||||
#define ptep_clear_young_notify(__vma, __address, __ptep) \
|
||||
({ \
|
||||
int __young; \
|
||||
struct vm_area_struct *___vma = __vma; \
|
||||
unsigned long ___address = __address; \
|
||||
__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
|
||||
__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
|
||||
___address + PAGE_SIZE); \
|
||||
__young; \
|
||||
})
|
||||
|
||||
#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
|
||||
({ \
|
||||
int __young; \
|
||||
struct vm_area_struct *___vma = __vma; \
|
||||
unsigned long ___address = __address; \
|
||||
__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
|
||||
__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
|
||||
___address + PMD_SIZE); \
|
||||
__young; \
|
||||
})
|
||||
|
||||
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
|
||||
({ \
|
||||
unsigned long ___addr = __address & PAGE_MASK; \
|
||||
@@ -427,6 +471,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
|
||||
#define ptep_clear_flush_young_notify ptep_clear_flush_young
|
||||
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
|
||||
#define ptep_clear_young_notify ptep_test_and_clear_young
|
||||
#define pmdp_clear_young_notify pmdp_test_and_clear_young
|
||||
#define ptep_clear_flush_notify ptep_clear_flush
|
||||
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
|
||||
#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
|
||||
|
||||
@@ -108,6 +108,10 @@ enum pageflags {
|
||||
#endif
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
PG_compound_lock,
|
||||
#endif
|
||||
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
|
||||
PG_young,
|
||||
PG_idle,
|
||||
#endif
|
||||
__NR_PAGEFLAGS,
|
||||
|
||||
@@ -289,6 +293,13 @@ PAGEFLAG_FALSE(HWPoison)
|
||||
#define __PG_HWPOISON 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
|
||||
TESTPAGEFLAG(Young, young)
|
||||
SETPAGEFLAG(Young, young)
|
||||
TESTCLEARFLAG(Young, young)
|
||||
PAGEFLAG(Idle, idle)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On an anonymous page mapped into a user virtual memory area,
|
||||
* page->mapping points to its anon_vma, not to a struct address_space;
|
||||
|
||||
@@ -26,6 +26,10 @@ enum page_ext_flags {
|
||||
PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
|
||||
PAGE_EXT_DEBUG_GUARD,
|
||||
PAGE_EXT_OWNER,
|
||||
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
|
||||
PAGE_EXT_YOUNG,
|
||||
PAGE_EXT_IDLE,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
110
include/linux/page_idle.h
Normal file
110
include/linux/page_idle.h
Normal file
@@ -0,0 +1,110 @@
|
||||
#ifndef _LINUX_MM_PAGE_IDLE_H
|
||||
#define _LINUX_MM_PAGE_IDLE_H
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/page_ext.h>
|
||||
|
||||
#ifdef CONFIG_IDLE_PAGE_TRACKING
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static inline bool page_is_young(struct page *page)
|
||||
{
|
||||
return PageYoung(page);
|
||||
}
|
||||
|
||||
static inline void set_page_young(struct page *page)
|
||||
{
|
||||
SetPageYoung(page);
|
||||
}
|
||||
|
||||
static inline bool test_and_clear_page_young(struct page *page)
|
||||
{
|
||||
return TestClearPageYoung(page);
|
||||
}
|
||||
|
||||
static inline bool page_is_idle(struct page *page)
|
||||
{
|
||||
return PageIdle(page);
|
||||
}
|
||||
|
||||
static inline void set_page_idle(struct page *page)
|
||||
{
|
||||
SetPageIdle(page);
|
||||
}
|
||||
|
||||
static inline void clear_page_idle(struct page *page)
|
||||
{
|
||||
ClearPageIdle(page);
|
||||
}
|
||||
#else /* !CONFIG_64BIT */
|
||||
/*
|
||||
* If there is not enough space to store Idle and Young bits in page flags, use
|
||||
* page ext flags instead.
|
||||
*/
|
||||
extern struct page_ext_operations page_idle_ops;
|
||||
|
||||
static inline bool page_is_young(struct page *page)
|
||||
{
|
||||
return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
|
||||
}
|
||||
|
||||
static inline void set_page_young(struct page *page)
|
||||
{
|
||||
set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
|
||||
}
|
||||
|
||||
static inline bool test_and_clear_page_young(struct page *page)
|
||||
{
|
||||
return test_and_clear_bit(PAGE_EXT_YOUNG,
|
||||
&lookup_page_ext(page)->flags);
|
||||
}
|
||||
|
||||
static inline bool page_is_idle(struct page *page)
|
||||
{
|
||||
return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
}
|
||||
|
||||
static inline void set_page_idle(struct page *page)
|
||||
{
|
||||
set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
}
|
||||
|
||||
static inline void clear_page_idle(struct page *page)
|
||||
{
|
||||
clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#else /* !CONFIG_IDLE_PAGE_TRACKING */
|
||||
|
||||
static inline bool page_is_young(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void set_page_young(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool test_and_clear_page_young(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool page_is_idle(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void set_page_idle(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_page_idle(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IDLE_PAGE_TRACKING */
|
||||
|
||||
#endif /* _LINUX_MM_PAGE_IDLE_H */
|
||||
@@ -19,8 +19,8 @@
|
||||
* under normal circumstances, used to verify that nobody uses
|
||||
* non-initialized list entries.
|
||||
*/
|
||||
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
|
||||
|
||||
/********** include/linux/timer.h **********/
|
||||
/*
|
||||
@@ -69,10 +69,6 @@
|
||||
#define ATM_POISON_FREE 0x12
|
||||
#define ATM_POISON 0xdeadbeef
|
||||
|
||||
/********** net/ **********/
|
||||
#define NEIGHBOR_DEAD 0xdeadbeef
|
||||
#define NETFILTER_LINK_POISON 0xdead57ac
|
||||
|
||||
/********** kernel/mutexes **********/
|
||||
#define MUTEX_DEBUG_INIT 0x11
|
||||
#define MUTEX_DEBUG_FREE 0x22
|
||||
@@ -83,7 +79,4 @@
|
||||
/********** security/ **********/
|
||||
#define KEY_DESTROY 0xbd
|
||||
|
||||
/********** sound/oss/ **********/
|
||||
#define OSS_POISON_FREE 0xAB
|
||||
|
||||
#endif
|
||||
|
||||
@@ -404,10 +404,10 @@ do { \
|
||||
static DEFINE_RATELIMIT_STATE(_rs, \
|
||||
DEFAULT_RATELIMIT_INTERVAL, \
|
||||
DEFAULT_RATELIMIT_BURST); \
|
||||
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
|
||||
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
|
||||
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
|
||||
__ratelimit(&_rs)) \
|
||||
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
|
||||
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
#elif defined(DEBUG)
|
||||
#define pr_debug_ratelimited(fmt, ...) \
|
||||
@@ -456,11 +456,17 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
|
||||
groupsize, buf, len, ascii) \
|
||||
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
|
||||
groupsize, buf, len, ascii)
|
||||
#else
|
||||
#elif defined(DEBUG)
|
||||
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
|
||||
groupsize, buf, len, ascii) \
|
||||
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
|
||||
groupsize, buf, len, ascii)
|
||||
#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
|
||||
#else
|
||||
static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize,
|
||||
const void *buf, size_t len, bool ascii)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -122,6 +122,10 @@ int seq_write(struct seq_file *seq, const void *data, size_t len);
|
||||
__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
|
||||
__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
|
||||
|
||||
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize, const void *buf, size_t len,
|
||||
bool ascii);
|
||||
|
||||
int seq_path(struct seq_file *, const struct path *, const char *);
|
||||
int seq_file_path(struct seq_file *, struct file *, const char *);
|
||||
int seq_dentry(struct seq_file *, struct dentry *, const char *);
|
||||
|
||||
@@ -48,24 +48,24 @@ static inline int string_unescape_any_inplace(char *buf)
|
||||
#define ESCAPE_HEX 0x20
|
||||
|
||||
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
|
||||
unsigned int flags, const char *esc);
|
||||
unsigned int flags, const char *only);
|
||||
|
||||
static inline int string_escape_mem_any_np(const char *src, size_t isz,
|
||||
char *dst, size_t osz, const char *esc)
|
||||
char *dst, size_t osz, const char *only)
|
||||
{
|
||||
return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
|
||||
return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only);
|
||||
}
|
||||
|
||||
static inline int string_escape_str(const char *src, char *dst, size_t sz,
|
||||
unsigned int flags, const char *esc)
|
||||
unsigned int flags, const char *only)
|
||||
{
|
||||
return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
|
||||
return string_escape_mem(src, strlen(src), dst, sz, flags, only);
|
||||
}
|
||||
|
||||
static inline int string_escape_str_any_np(const char *src, char *dst,
|
||||
size_t sz, const char *esc)
|
||||
size_t sz, const char *only)
|
||||
{
|
||||
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
|
||||
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -36,6 +36,8 @@ enum zpool_mapmode {
|
||||
ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
|
||||
};
|
||||
|
||||
bool zpool_has_pool(char *type);
|
||||
|
||||
struct zpool *zpool_create_pool(char *type, char *name,
|
||||
gfp_t gfp, const struct zpool_ops *ops);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user