mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
662df3e5c3
Implement a new lightweight guard page feature, that is regions of userland virtual memory that, when accessed, cause a fatal signal to arise. Currently users must establish PROT_NONE ranges to achieve this. However this is very costly memory-wise - we need a VMA for each and every one of these regions AND they become unmergeable with surrounding VMAs. In addition repeated mmap() calls require repeated kernel context switches and contention of the mmap lock to install these ranges, potentially also having to unmap memory if installed over existing ranges. The lightweight guard approach eliminates the VMA cost altogether - rather than establishing a PROT_NONE VMA, it operates at the level of page table entries - establishing PTE markers such that accesses to them cause a fault followed by a SIGSGEV signal being raised. This is achieved through the PTE marker mechanism, which we have already extended to provide PTE_MARKER_GUARD, which we installed via the generic page walking logic which we have extended for this purpose. These guard ranges are established with MADV_GUARD_INSTALL. If the range in which they are installed contain any existing mappings, they will be zapped, i.e. free the range and unmap memory (thus mimicking the behaviour of MADV_DONTNEED in this respect). Any existing guard entries will be left untouched. There is therefore no nesting of guarded pages. Guarded ranges are NOT cleared by MADV_DONTNEED nor MADV_FREE (in both instances the memory range may be reused at which point a user would expect guards to still be in place), but they are cleared via MADV_GUARD_REMOVE, process teardown or unmapping of memory ranges. The guard property can be removed from ranges via MADV_GUARD_REMOVE. The ranges over which this is applied, should they contain non-guard entries, will be untouched, with only guard entries being cleared. We permit this operation on anonymous memory only, and only VMAs which are non-special, non-huge and not mlock()'d (if we permitted this we'd have to drop locked pages which would be rather counterintuitive). Racing page faults can cause repeated attempts to install guard pages that are interrupted, result in a zap, and this process can end up being repeated. If this happens more than would be expected in normal operation, we rescind locks and retry the whole thing, which avoids lock contention in this scenario. Link: https://lkml.kernel.org/r/6aafb5821bf209f277dfae0787abb2ef87a37542.1730123433.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Suggested-by: Jann Horn <jannh@google.com> Suggested-by: David Hildenbrand <david@redhat.com> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Suggested-by: Jann Horn <jannh@google.com> Suggested-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Arnd Bergmann <arnd@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Chris Zankel <chris@zankel.net> Cc: Helge Deller <deller@gmx.de> Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com> Cc: Jeff Xu <jeffxu@chromium.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam R. Howlett <Liam.Howlett@Oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Shuah Khan <shuah@kernel.org> Cc: Shuah Khan <skhan@linuxfoundation.org> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
94 lines
3.8 KiB
C
94 lines
3.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
|
#ifndef __ASM_GENERIC_MMAN_COMMON_H
|
|
#define __ASM_GENERIC_MMAN_COMMON_H
|
|
|
|
/*
|
|
Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
|
|
Based on: asm-xxx/mman.h
|
|
*/
|
|
|
|
#define PROT_READ 0x1 /* page can be read */
|
|
#define PROT_WRITE 0x2 /* page can be written */
|
|
#define PROT_EXEC 0x4 /* page can be executed */
|
|
#define PROT_SEM 0x8 /* page may be used for atomic ops */
|
|
/* 0x10 reserved for arch-specific use */
|
|
/* 0x20 reserved for arch-specific use */
|
|
#define PROT_NONE 0x0 /* page can not be accessed */
|
|
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
|
|
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
|
|
|
|
/* 0x01 - 0x03 are defined in linux/mman.h */
|
|
#define MAP_TYPE 0x0f /* Mask for type of mapping */
|
|
#define MAP_FIXED 0x10 /* Interpret addr exactly */
|
|
#define MAP_ANONYMOUS 0x20 /* don't use a file */
|
|
|
|
/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
|
|
#define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */
|
|
#define MAP_NONBLOCK 0x010000 /* do not block on IO */
|
|
#define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */
|
|
#define MAP_HUGETLB 0x040000 /* create a huge page mapping */
|
|
#define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */
|
|
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
|
|
|
|
#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
|
|
* uninitialized */
|
|
|
|
/*
|
|
* Flags for mlock
|
|
*/
|
|
#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
|
|
|
|
#define MS_ASYNC 1 /* sync memory asynchronously */
|
|
#define MS_INVALIDATE 2 /* invalidate the caches */
|
|
#define MS_SYNC 4 /* synchronous memory sync */
|
|
|
|
#define MADV_NORMAL 0 /* no further special treatment */
|
|
#define MADV_RANDOM 1 /* expect random page references */
|
|
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
|
|
#define MADV_WILLNEED 3 /* will need these pages */
|
|
#define MADV_DONTNEED 4 /* don't need these pages */
|
|
|
|
/* common parameters: try to keep these consistent across architectures */
|
|
#define MADV_FREE 8 /* free pages only if memory pressure */
|
|
#define MADV_REMOVE 9 /* remove these pages & resources */
|
|
#define MADV_DONTFORK 10 /* don't inherit across fork */
|
|
#define MADV_DOFORK 11 /* do inherit across fork */
|
|
#define MADV_HWPOISON 100 /* poison a page for testing */
|
|
#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
|
|
|
|
#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
|
|
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
|
|
|
|
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
|
|
#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
|
|
|
|
#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
|
|
overrides the coredump filter bits */
|
|
#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
|
|
|
|
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
|
|
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
|
|
|
|
#define MADV_COLD 20 /* deactivate these pages */
|
|
#define MADV_PAGEOUT 21 /* reclaim these pages */
|
|
|
|
#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
|
|
#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
|
|
|
|
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
|
|
|
|
#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
|
|
|
|
#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */
|
|
#define MADV_GUARD_REMOVE 103 /* unguard range */
|
|
|
|
/* compatibility flags */
|
|
#define MAP_FILE 0
|
|
|
|
#define PKEY_DISABLE_ACCESS 0x1
|
|
#define PKEY_DISABLE_WRITE 0x2
|
|
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
|
|
PKEY_DISABLE_WRITE)
|
|
|
|
#endif /* __ASM_GENERIC_MMAN_COMMON_H */
|