forked from Minki/linux
4ed2863951
Both load_elf_interp and load_elf_binary rely on elf_map to map segments on a controlled address and they use MAP_FIXED to enforce that. This is however dangerous thing prone to silent data corruption which can be even exploitable. Let's take CVE-2017-1000253 as an example. At the time (before commiteab09532d4
: "binfmt_elf: use ELF_ET_DYN_BASE only for PIE") ELF_ET_DYN_BASE was at TASK_SIZE / 3 * 2 which is not that far away from the stack top on 32b (legacy) memory layout (only 1GB away). Therefore we could end up mapping over the existing stack with some luck. The issue has been fixed since then (a87938b2e2
: "fs/binfmt_elf.c: fix bug in loading of PIE binaries"), ELF_ET_DYN_BASE moved moved much further from the stack (eab09532d4
and later byc715b72c1b
: "mm: revert x86_64 and arm64 ELF_ET_DYN_BASE base changes") and excessive stack consumption early during execve fully stopped byda029c11e6
("exec: Limit arg stack to at most 75% of _STK_LIM"). So we should be safe and any attack should be impractical. On the other hand this is just too subtle assumption so it can break quite easily and hard to spot. I believe that the MAP_FIXED usage in load_elf_binary (et. al) is still fundamentally dangerous. Moreover it shouldn't be even needed. We are at the early process stage and so there shouldn't be unrelated mappings (except for stack and loader) existing so mmap for a given address should succeed even without MAP_FIXED. Something is terribly wrong if this is not the case and we should rather fail than silently corrupt the underlying mapping. Address this issue by changing MAP_FIXED to the newly added MAP_FIXED_NOREPLACE. This will mean that mmap will fail if there is an existing mapping clashing with the requested one without clobbering it. [mhocko@suse.com: fix build] [akpm@linux-foundation.org: coding-style fixes] [avagin@openvz.org: don't use the same value for MAP_FIXED_NOREPLACE and MAP_SYNC] Link: http://lkml.kernel.org/r/20171218184916.24445-1-avagin@openvz.org Link: http://lkml.kernel.org/r/20171213092550.2774-3-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrei Vagin <avagin@openvz.org> Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Khalid Aziz <khalid.aziz@oracle.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Acked-by: Kees Cook <keescook@chromium.org> Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Cc: Joel Stanley <joel@jms.id.au> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
78 lines
3.1 KiB
C
78 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
|
#ifndef __ASM_GENERIC_MMAN_COMMON_H
|
|
#define __ASM_GENERIC_MMAN_COMMON_H
|
|
|
|
/*
|
|
Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
|
|
Based on: asm-xxx/mman.h
|
|
*/
|
|
|
|
#define PROT_READ 0x1 /* page can be read */
|
|
#define PROT_WRITE 0x2 /* page can be written */
|
|
#define PROT_EXEC 0x4 /* page can be executed */
|
|
#define PROT_SEM 0x8 /* page may be used for atomic ops */
|
|
#define PROT_NONE 0x0 /* page can not be accessed */
|
|
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
|
|
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
|
|
|
|
#define MAP_SHARED 0x01 /* Share changes */
|
|
#define MAP_PRIVATE 0x02 /* Changes are private */
|
|
#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
|
|
#define MAP_TYPE 0x0f /* Mask for type of mapping */
|
|
#define MAP_FIXED 0x10 /* Interpret addr exactly */
|
|
#define MAP_ANONYMOUS 0x20 /* don't use a file */
|
|
#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
|
|
# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
|
|
#else
|
|
# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
|
|
#endif
|
|
|
|
/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
|
|
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
|
|
|
|
/*
|
|
* Flags for mlock
|
|
*/
|
|
#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
|
|
|
|
#define MS_ASYNC 1 /* sync memory asynchronously */
|
|
#define MS_INVALIDATE 2 /* invalidate the caches */
|
|
#define MS_SYNC 4 /* synchronous memory sync */
|
|
|
|
#define MADV_NORMAL 0 /* no further special treatment */
|
|
#define MADV_RANDOM 1 /* expect random page references */
|
|
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
|
|
#define MADV_WILLNEED 3 /* will need these pages */
|
|
#define MADV_DONTNEED 4 /* don't need these pages */
|
|
|
|
/* common parameters: try to keep these consistent across architectures */
|
|
#define MADV_FREE 8 /* free pages only if memory pressure */
|
|
#define MADV_REMOVE 9 /* remove these pages & resources */
|
|
#define MADV_DONTFORK 10 /* don't inherit across fork */
|
|
#define MADV_DOFORK 11 /* do inherit across fork */
|
|
#define MADV_HWPOISON 100 /* poison a page for testing */
|
|
#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
|
|
|
|
#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
|
|
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
|
|
|
|
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
|
|
#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
|
|
|
|
#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
|
|
overrides the coredump filter bits */
|
|
#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
|
|
|
|
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
|
|
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
|
|
|
|
/* compatibility flags */
|
|
#define MAP_FILE 0
|
|
|
|
#define PKEY_DISABLE_ACCESS 0x1
|
|
#define PKEY_DISABLE_WRITE 0x2
|
|
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
|
|
PKEY_DISABLE_WRITE)
|
|
|
|
#endif /* __ASM_GENERIC_MMAN_COMMON_H */
|