2019-05-19 12:07:45 +00:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
2018-07-31 11:39:35 +00:00
|
|
|
|
|
|
|
menu "Memory Management options"
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
#
|
|
|
|
# For some reason microblaze and nios2 hard code SWAP=n. Hopefully we can
|
|
|
|
# add proper SWAP support to them, in which case this can be remove.
|
|
|
|
#
|
|
|
|
config ARCH_NO_SWAP
|
|
|
|
bool
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config ZPOOL
|
|
|
|
bool
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
menuconfig SWAP
|
2022-05-19 21:08:53 +00:00
|
|
|
bool "Support for paging of anonymous memory (swap)"
|
|
|
|
depends on MMU && BLOCK && !ARCH_NO_SWAP
|
|
|
|
default y
|
|
|
|
help
|
|
|
|
This option allows you to choose whether you want to have support
|
|
|
|
for so called swap devices or swap files in your kernel that are
|
|
|
|
used to provide more virtual memory than the actual RAM present
|
|
|
|
in your computer. If unsure say Y.
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config ZSWAP
|
|
|
|
bool "Compressed cache for swap pages (EXPERIMENTAL)"
|
2022-05-19 21:08:53 +00:00
|
|
|
depends on SWAP
|
2022-05-19 21:08:53 +00:00
|
|
|
select FRONTSWAP
|
2022-05-19 21:08:53 +00:00
|
|
|
select CRYPTO
|
2022-05-19 21:08:53 +00:00
|
|
|
select ZPOOL
|
|
|
|
help
|
|
|
|
A lightweight compressed cache for swap pages. It takes
|
|
|
|
pages that are in the process of being swapped out and attempts to
|
|
|
|
compress them into a dynamically allocated RAM-based memory pool.
|
|
|
|
This can result in a significant I/O reduction on swap device and,
|
2022-07-28 16:51:39 +00:00
|
|
|
in the case where decompressing from RAM is faster than swap device
|
2022-05-19 21:08:53 +00:00
|
|
|
reads, can also improve workload performance.
|
|
|
|
|
|
|
|
This is marked experimental because it is a new feature (as of
|
|
|
|
v3.11) that interacts heavily with memory reclaim. While these
|
|
|
|
interactions don't cause any known issues on simple memory setups,
|
|
|
|
they have not be fully explored on the large set of potential
|
|
|
|
configurations and workloads that exist.
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config ZSWAP_DEFAULT_ON
|
|
|
|
bool "Enable the compressed cache for swap pages by default"
|
|
|
|
depends on ZSWAP
|
|
|
|
help
|
|
|
|
If selected, the compressed cache for swap pages will be enabled
|
|
|
|
at boot, otherwise it will be disabled.
|
|
|
|
|
|
|
|
The selection made here can be overridden by using the kernel
|
|
|
|
command line 'zswap.enabled=' option.
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
choice
|
2022-05-19 21:08:53 +00:00
|
|
|
prompt "Default compressor"
|
2022-05-19 21:08:53 +00:00
|
|
|
depends on ZSWAP
|
|
|
|
default ZSWAP_COMPRESSOR_DEFAULT_LZO
|
|
|
|
help
|
|
|
|
Selects the default compression algorithm for the compressed cache
|
|
|
|
for swap pages.
|
|
|
|
|
|
|
|
For an overview what kind of performance can be expected from
|
|
|
|
a particular compression algorithm please refer to the benchmarks
|
|
|
|
available at the following LWN page:
|
|
|
|
https://lwn.net/Articles/751795/
|
|
|
|
|
|
|
|
If in doubt, select 'LZO'.
|
|
|
|
|
|
|
|
The selection made here can be overridden by using the kernel
|
|
|
|
command line 'zswap.compressor=' option.
|
|
|
|
|
|
|
|
config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
|
|
|
|
bool "Deflate"
|
|
|
|
select CRYPTO_DEFLATE
|
|
|
|
help
|
|
|
|
Use the Deflate algorithm as the default compression algorithm.
|
|
|
|
|
|
|
|
config ZSWAP_COMPRESSOR_DEFAULT_LZO
|
|
|
|
bool "LZO"
|
|
|
|
select CRYPTO_LZO
|
|
|
|
help
|
|
|
|
Use the LZO algorithm as the default compression algorithm.
|
|
|
|
|
|
|
|
config ZSWAP_COMPRESSOR_DEFAULT_842
|
|
|
|
bool "842"
|
|
|
|
select CRYPTO_842
|
|
|
|
help
|
|
|
|
Use the 842 algorithm as the default compression algorithm.
|
|
|
|
|
|
|
|
config ZSWAP_COMPRESSOR_DEFAULT_LZ4
|
|
|
|
bool "LZ4"
|
|
|
|
select CRYPTO_LZ4
|
|
|
|
help
|
|
|
|
Use the LZ4 algorithm as the default compression algorithm.
|
|
|
|
|
|
|
|
config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
|
|
|
|
bool "LZ4HC"
|
|
|
|
select CRYPTO_LZ4HC
|
|
|
|
help
|
|
|
|
Use the LZ4HC algorithm as the default compression algorithm.
|
|
|
|
|
|
|
|
config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
|
|
|
|
bool "zstd"
|
|
|
|
select CRYPTO_ZSTD
|
|
|
|
help
|
|
|
|
Use the zstd algorithm as the default compression algorithm.
|
|
|
|
endchoice
|
|
|
|
|
|
|
|
config ZSWAP_COMPRESSOR_DEFAULT
|
|
|
|
string
|
|
|
|
depends on ZSWAP
|
|
|
|
default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
|
|
|
|
default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
|
|
|
|
default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
|
|
|
|
default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
|
|
|
|
default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
|
|
|
|
default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
|
|
|
|
default ""
|
|
|
|
|
|
|
|
choice
|
2022-05-19 21:08:53 +00:00
|
|
|
prompt "Default allocator"
|
2022-05-19 21:08:53 +00:00
|
|
|
depends on ZSWAP
|
|
|
|
default ZSWAP_ZPOOL_DEFAULT_ZBUD
|
|
|
|
help
|
|
|
|
Selects the default allocator for the compressed cache for
|
|
|
|
swap pages.
|
|
|
|
The default is 'zbud' for compatibility, however please do
|
|
|
|
read the description of each of the allocators below before
|
|
|
|
making a right choice.
|
|
|
|
|
|
|
|
The selection made here can be overridden by using the kernel
|
|
|
|
command line 'zswap.zpool=' option.
|
|
|
|
|
|
|
|
config ZSWAP_ZPOOL_DEFAULT_ZBUD
|
|
|
|
bool "zbud"
|
|
|
|
select ZBUD
|
|
|
|
help
|
|
|
|
Use the zbud allocator as the default allocator.
|
|
|
|
|
|
|
|
config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
|
|
|
|
bool "z3fold"
|
|
|
|
select Z3FOLD
|
|
|
|
help
|
|
|
|
Use the z3fold allocator as the default allocator.
|
|
|
|
|
|
|
|
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
|
|
|
|
bool "zsmalloc"
|
|
|
|
select ZSMALLOC
|
|
|
|
help
|
|
|
|
Use the zsmalloc allocator as the default allocator.
|
|
|
|
endchoice
|
|
|
|
|
|
|
|
config ZSWAP_ZPOOL_DEFAULT
|
|
|
|
string
|
|
|
|
depends on ZSWAP
|
|
|
|
default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
|
|
|
|
default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
|
|
|
|
default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
|
|
|
|
default ""
|
|
|
|
|
|
|
|
config ZBUD
|
2022-05-19 21:08:53 +00:00
|
|
|
tristate "2:1 compression allocator (zbud)"
|
|
|
|
depends on ZSWAP
|
2022-05-19 21:08:53 +00:00
|
|
|
help
|
|
|
|
A special purpose allocator for storing compressed pages.
|
|
|
|
It is designed to store up to two compressed pages per physical
|
|
|
|
page. While this design limits storage density, it has simple and
|
|
|
|
deterministic reclaim properties that make it preferable to a higher
|
|
|
|
density approach when reclaim will be used.
|
|
|
|
|
|
|
|
config Z3FOLD
|
2022-05-19 21:08:53 +00:00
|
|
|
tristate "3:1 compression allocator (z3fold)"
|
|
|
|
depends on ZSWAP
|
2022-05-19 21:08:53 +00:00
|
|
|
help
|
|
|
|
A special purpose allocator for storing compressed pages.
|
|
|
|
It is designed to store up to three compressed pages per physical
|
|
|
|
page. It is a ZBUD derivative so the simplicity and determinism are
|
|
|
|
still there.
|
|
|
|
|
|
|
|
config ZSMALLOC
|
2022-05-19 21:08:53 +00:00
|
|
|
tristate
|
|
|
|
prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
|
2022-05-19 21:08:53 +00:00
|
|
|
depends on MMU
|
|
|
|
help
|
|
|
|
zsmalloc is a slab-based memory allocator designed to store
|
2022-05-19 21:08:53 +00:00
|
|
|
pages of various compression levels efficiently. It achieves
|
|
|
|
the highest storage density with the least amount of fragmentation.
|
2022-05-19 21:08:53 +00:00
|
|
|
|
|
|
|
config ZSMALLOC_STAT
|
|
|
|
bool "Export zsmalloc statistics"
|
|
|
|
depends on ZSMALLOC
|
|
|
|
select DEBUG_FS
|
|
|
|
help
|
|
|
|
This option enables code in the zsmalloc to collect various
|
|
|
|
statistics about what's happening in zsmalloc and exports that
|
|
|
|
information to userspace via debugfs.
|
|
|
|
If unsure, say N.
|
|
|
|
|
|
|
|
menu "SLAB allocator options"
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
choice
|
|
|
|
prompt "Choose SLAB allocator"
|
|
|
|
default SLUB
|
|
|
|
help
|
|
|
|
This option allows to select a slab allocator.
|
|
|
|
|
|
|
|
config SLAB
|
|
|
|
bool "SLAB"
|
|
|
|
depends on !PREEMPT_RT
|
|
|
|
select HAVE_HARDENED_USERCOPY_ALLOCATOR
|
|
|
|
help
|
|
|
|
The regular slab allocator that is established and known to work
|
|
|
|
well in all environments. It organizes cache hot objects in
|
|
|
|
per cpu and per node queues.
|
|
|
|
|
|
|
|
config SLUB
|
|
|
|
bool "SLUB (Unqueued Allocator)"
|
|
|
|
select HAVE_HARDENED_USERCOPY_ALLOCATOR
|
|
|
|
help
|
|
|
|
SLUB is a slab allocator that minimizes cache line usage
|
|
|
|
instead of managing queues of cached objects (SLAB approach).
|
|
|
|
Per cpu caching is realized using slabs of objects instead
|
|
|
|
of queues of objects. SLUB can use memory efficiently
|
|
|
|
and has enhanced diagnostics. SLUB is the default choice for
|
|
|
|
a slab allocator.
|
|
|
|
|
|
|
|
config SLOB
|
|
|
|
depends on EXPERT
|
|
|
|
bool "SLOB (Simple Allocator)"
|
|
|
|
depends on !PREEMPT_RT
|
|
|
|
help
|
|
|
|
SLOB replaces the stock allocator with a drastically simpler
|
|
|
|
allocator. SLOB is generally more space efficient but
|
|
|
|
does not perform as well on large systems.
|
|
|
|
|
|
|
|
endchoice
|
|
|
|
|
|
|
|
config SLAB_MERGE_DEFAULT
|
|
|
|
bool "Allow slab caches to be merged"
|
|
|
|
default y
|
|
|
|
depends on SLAB || SLUB
|
|
|
|
help
|
|
|
|
For reduced kernel memory fragmentation, slab caches can be
|
|
|
|
merged when they share the same size and other characteristics.
|
|
|
|
This carries a risk of kernel heap overflows being able to
|
|
|
|
overwrite objects from merged caches (and more easily control
|
|
|
|
cache layout), which makes such heap attacks easier to exploit
|
|
|
|
by attackers. By keeping caches unmerged, these kinds of exploits
|
|
|
|
can usually only damage objects in the same cache. To disable
|
|
|
|
merging at runtime, "slab_nomerge" can be passed on the kernel
|
|
|
|
command line.
|
|
|
|
|
|
|
|
config SLAB_FREELIST_RANDOM
|
|
|
|
bool "Randomize slab freelist"
|
|
|
|
depends on SLAB || SLUB
|
|
|
|
help
|
|
|
|
Randomizes the freelist order used on creating new pages. This
|
|
|
|
security feature reduces the predictability of the kernel slab
|
|
|
|
allocator against heap overflows.
|
|
|
|
|
|
|
|
config SLAB_FREELIST_HARDENED
|
|
|
|
bool "Harden slab freelist metadata"
|
|
|
|
depends on SLAB || SLUB
|
|
|
|
help
|
|
|
|
Many kernel heap attacks try to target slab cache metadata and
|
|
|
|
other infrastructure. This options makes minor performance
|
|
|
|
sacrifices to harden the kernel slab allocator against common
|
|
|
|
freelist exploit methods. Some slab implementations have more
|
|
|
|
sanity-checking than others. This option is most effective with
|
|
|
|
CONFIG_SLUB.
|
|
|
|
|
2022-05-25 11:25:59 +00:00
|
|
|
config SLUB_STATS
|
|
|
|
default n
|
|
|
|
bool "Enable SLUB performance statistics"
|
|
|
|
depends on SLUB && SYSFS
|
|
|
|
help
|
|
|
|
SLUB statistics are useful to debug SLUBs allocation behavior in
|
|
|
|
order find ways to optimize the allocator. This should never be
|
|
|
|
enabled for production use since keeping statistics slows down
|
|
|
|
the allocator by a few percentage points. The slabinfo command
|
|
|
|
supports the determination of the most active slabs to figure
|
|
|
|
out which slabs are relevant to a particular load.
|
|
|
|
Try running: slabinfo -DA
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config SLUB_CPU_PARTIAL
|
|
|
|
default y
|
|
|
|
depends on SLUB && SMP
|
|
|
|
bool "SLUB per cpu partial cache"
|
|
|
|
help
|
|
|
|
Per cpu partial caches accelerate objects allocation and freeing
|
|
|
|
that is local to a processor at the price of more indeterminism
|
|
|
|
in the latency of the free. On overflow these caches will be cleared
|
|
|
|
which requires the taking of locks that may cause latency spikes.
|
|
|
|
Typically one would choose no for a realtime system.
|
|
|
|
|
|
|
|
endmenu # SLAB allocator options
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config SHUFFLE_PAGE_ALLOCATOR
|
|
|
|
bool "Page allocator randomization"
|
|
|
|
default SLAB_FREELIST_RANDOM && ACPI_NUMA
|
|
|
|
help
|
|
|
|
Randomization of the page allocator improves the average
|
|
|
|
utilization of a direct-mapped memory-side-cache. See section
|
|
|
|
5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
|
|
|
|
6.2a specification for an example of how a platform advertises
|
|
|
|
the presence of a memory-side-cache. There are also incidental
|
|
|
|
security benefits as it reduces the predictability of page
|
|
|
|
allocations to compliment SLAB_FREELIST_RANDOM, but the
|
|
|
|
default granularity of shuffling on the "MAX_ORDER - 1" i.e,
|
|
|
|
10th order of pages is selected based on cache utilization
|
|
|
|
benefits on x86.
|
|
|
|
|
|
|
|
While the randomization improves cache utilization it may
|
|
|
|
negatively impact workloads on platforms without a cache. For
|
|
|
|
this reason, by default, the randomization is enabled only
|
|
|
|
after runtime detection of a direct-mapped memory-side-cache.
|
|
|
|
Otherwise, the randomization may be force enabled with the
|
|
|
|
'page_alloc.shuffle' kernel command line parameter.
|
|
|
|
|
|
|
|
Say Y if unsure.
|
|
|
|
|
2022-05-25 11:25:59 +00:00
|
|
|
config COMPAT_BRK
|
|
|
|
bool "Disable heap randomization"
|
|
|
|
default y
|
|
|
|
help
|
|
|
|
Randomizing heap placement makes heap exploits harder, but it
|
|
|
|
also breaks ancient binaries (including anything libc5 based).
|
|
|
|
This option changes the bootup default to heap randomization
|
|
|
|
disabled, and can be overridden at runtime by setting
|
|
|
|
/proc/sys/kernel/randomize_va_space to 2.
|
|
|
|
|
|
|
|
On non-ancient distros (post-2000 ones) N is usually a safe choice.
|
|
|
|
|
|
|
|
config MMAP_ALLOW_UNINITIALIZED
|
|
|
|
bool "Allow mmapped anonymous memory to be uninitialized"
|
|
|
|
depends on EXPERT && !MMU
|
|
|
|
default n
|
|
|
|
help
|
|
|
|
Normally, and according to the Linux spec, anonymous memory obtained
|
|
|
|
from mmap() has its contents cleared before it is passed to
|
|
|
|
userspace. Enabling this config option allows you to request that
|
|
|
|
mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
|
|
|
|
providing a huge performance boost. If this option is not enabled,
|
|
|
|
then the flag will be ignored.
|
|
|
|
|
|
|
|
This is taken advantage of by uClibc's malloc(), and also by
|
|
|
|
ELF-FDPIC binfmt's brk and stack allocator.
|
|
|
|
|
|
|
|
Because of the obvious security issues, this option should only be
|
|
|
|
enabled on embedded devices where you control what is run in
|
|
|
|
userspace. Since that isn't generally a problem on no-MMU systems,
|
|
|
|
it is normally safe to say Y here.
|
|
|
|
|
|
|
|
See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
|
|
|
|
|
2005-06-23 07:07:49 +00:00
|
|
|
config SELECT_MEMORY_MODEL
|
|
|
|
def_bool y
|
2013-01-17 02:54:17 +00:00
|
|
|
depends on ARCH_SELECT_MEMORY_MODEL
|
2005-06-23 07:07:49 +00:00
|
|
|
|
2005-06-23 07:07:42 +00:00
|
|
|
choice
|
|
|
|
prompt "Memory model"
|
2005-06-23 07:07:49 +00:00
|
|
|
depends on SELECT_MEMORY_MODEL
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
|
2005-06-23 07:07:49 +00:00
|
|
|
default FLATMEM_MANUAL
|
2019-05-14 00:23:05 +00:00
|
|
|
help
|
|
|
|
This option allows you to change some of the ways that
|
|
|
|
Linux manages its memory internally. Most users will
|
|
|
|
only have one option here selected by the architecture
|
|
|
|
configuration. This is normal.
|
2005-06-23 07:07:42 +00:00
|
|
|
|
2005-06-23 07:07:49 +00:00
|
|
|
config FLATMEM_MANUAL
|
2005-06-23 07:07:42 +00:00
|
|
|
bool "Flat Memory"
|
2021-06-29 02:42:52 +00:00
|
|
|
depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
|
2005-06-23 07:07:42 +00:00
|
|
|
help
|
2019-05-14 00:23:05 +00:00
|
|
|
This option is best suited for non-NUMA systems with
|
|
|
|
flat address space. The FLATMEM is the most efficient
|
|
|
|
system in terms of performance and resource consumption
|
|
|
|
and it is the best option for smaller systems.
|
|
|
|
|
|
|
|
For systems that have holes in their physical address
|
|
|
|
spaces and for features like NUMA and memory hotplug,
|
2019-12-01 01:58:26 +00:00
|
|
|
choose "Sparse Memory".
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
|
|
|
|
If unsure, choose this option (Flat Memory) over any other.
|
2005-06-23 07:07:42 +00:00
|
|
|
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
config SPARSEMEM_MANUAL
|
|
|
|
bool "Sparse Memory"
|
|
|
|
depends on ARCH_SPARSEMEM_ENABLE
|
|
|
|
help
|
|
|
|
This will be the only option for some systems, including
|
2019-05-14 00:23:05 +00:00
|
|
|
memory hot-plug systems. This is normal.
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
|
2019-05-14 00:23:05 +00:00
|
|
|
This option provides efficient support for systems with
|
|
|
|
holes is their physical address space and allows memory
|
|
|
|
hot-plug and hot-remove.
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
|
2019-05-14 00:23:05 +00:00
|
|
|
If unsure, choose "Flat Memory" over this option.
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
|
2005-06-23 07:07:42 +00:00
|
|
|
endchoice
|
|
|
|
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
config SPARSEMEM
|
|
|
|
def_bool y
|
2009-10-26 23:50:12 +00:00
|
|
|
depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
|
2005-06-23 07:07:49 +00:00
|
|
|
config FLATMEM
|
|
|
|
def_bool y
|
2021-06-29 02:42:52 +00:00
|
|
|
depends on !SPARSEMEM || FLATMEM_MANUAL
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
|
2005-09-03 22:54:28 +00:00
|
|
|
#
|
|
|
|
# SPARSEMEM_EXTREME (which is the default) does some bootmem
|
2020-08-07 06:24:02 +00:00
|
|
|
# allocations when sparse_init() is called. If this cannot
|
2005-09-03 22:54:28 +00:00
|
|
|
# be done on your architecture, select this option. However,
|
|
|
|
# statically allocating the mem_section[] array can potentially
|
|
|
|
# consume vast quantities of .bss, so be careful.
|
|
|
|
#
|
|
|
|
# This option will also potentially produce smaller runtime code
|
|
|
|
# with gcc 3.4 and later.
|
|
|
|
#
|
|
|
|
config SPARSEMEM_STATIC
|
2008-10-16 05:01:38 +00:00
|
|
|
bool
|
2005-09-03 22:54:28 +00:00
|
|
|
|
2005-09-03 22:54:26 +00:00
|
|
|
#
|
2006-10-03 20:34:14 +00:00
|
|
|
# Architecture platforms which require a two level mem_section in SPARSEMEM
|
2005-09-03 22:54:26 +00:00
|
|
|
# must select this option. This is usually for architecture platforms with
|
|
|
|
# an extremely sparse physical address space.
|
|
|
|
#
|
2005-09-03 22:54:28 +00:00
|
|
|
config SPARSEMEM_EXTREME
|
|
|
|
def_bool y
|
|
|
|
depends on SPARSEMEM && !SPARSEMEM_STATIC
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
|
2007-10-16 08:24:14 +00:00
|
|
|
config SPARSEMEM_VMEMMAP_ENABLE
|
2008-10-16 05:01:38 +00:00
|
|
|
bool
|
2007-10-16 08:24:14 +00:00
|
|
|
|
|
|
|
config SPARSEMEM_VMEMMAP
|
2007-12-18 00:19:53 +00:00
|
|
|
bool "Sparse Memory virtual memmap"
|
|
|
|
depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
|
|
|
|
default y
|
|
|
|
help
|
2019-12-01 01:58:23 +00:00
|
|
|
SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
|
|
|
|
pfn_to_page and page_to_pfn operations. This is the most
|
|
|
|
efficient option when sufficient kernel resources are available.
|
2007-10-16 08:24:14 +00:00
|
|
|
|
2014-01-29 17:16:01 +00:00
|
|
|
config HAVE_MEMBLOCK_PHYS_MAP
|
2014-12-20 20:41:11 +00:00
|
|
|
bool
|
2014-01-29 17:16:01 +00:00
|
|
|
|
2019-07-12 03:57:14 +00:00
|
|
|
config HAVE_FAST_GUP
|
2019-07-12 03:57:21 +00:00
|
|
|
depends on MMU
|
2014-12-20 20:41:11 +00:00
|
|
|
bool
|
2014-10-09 22:29:14 +00:00
|
|
|
|
2020-06-04 23:48:38 +00:00
|
|
|
# Don't discard allocated memory used to track "memory" and "reserved" memblocks
|
|
|
|
# after early boot, so it can still be used to test for validity of memory.
|
|
|
|
# Also, memblocks are updated with memory hot(un)plug.
|
2019-05-14 00:22:59 +00:00
|
|
|
config ARCH_KEEP_MEMBLOCK
|
2014-12-20 20:41:11 +00:00
|
|
|
bool
|
2011-07-14 09:46:03 +00:00
|
|
|
|
2020-02-16 20:01:04 +00:00
|
|
|
# Keep arch NUMA mapping infrastructure post-init.
|
|
|
|
config NUMA_KEEP_MEMINFO
|
|
|
|
bool
|
|
|
|
|
2012-07-31 23:43:50 +00:00
|
|
|
config MEMORY_ISOLATION
|
2014-12-20 20:41:11 +00:00
|
|
|
bool
|
2012-07-31 23:43:50 +00:00
|
|
|
|
kernel/resource: disallow access to exclusive system RAM regions
virtio-mem dynamically exposes memory inside a device memory region as
system RAM to Linux, coordinating with the hypervisor which parts are
actually "plugged" and consequently usable/accessible.
On the one hand, the virtio-mem driver adds/removes whole memory blocks,
creating/removing busy IORESOURCE_SYSTEM_RAM resources, on the other
hand, it logically (un)plugs memory inside added memory blocks,
dynamically either exposing them to the buddy or hiding them from the
buddy and marking them PG_offline.
In contrast to physical devices, like a DIMM, the virtio-mem driver is
required to actually make use of any of the device-provided memory,
because it performs the handshake with the hypervisor. virtio-mem
memory cannot simply be access via /dev/mem without a driver.
There is no safe way to:
a) Access plugged memory blocks via /dev/mem, as they might contain
unplugged holes or might get silently unplugged by the virtio-mem
driver and consequently turned inaccessible.
b) Access unplugged memory blocks via /dev/mem because the virtio-mem
driver is required to make them actually accessible first.
The virtio-spec states that unplugged memory blocks MUST NOT be written,
and only selected unplugged memory blocks MAY be read. We want to make
sure, this is the case in sane environments -- where the virtio-mem driver
was loaded.
We want to make sure that in a sane environment, nobody "accidentially"
accesses unplugged memory inside the device managed region. For example,
a user might spot a memory region in /proc/iomem and try accessing it via
/dev/mem via gdb or dumping it via something else. By the time the mmap()
happens, the memory might already have been removed by the virtio-mem
driver silently: the mmap() would succeeed and user space might
accidentially access unplugged memory.
So once the driver was loaded and detected the device along the
device-managed region, we just want to disallow any access via /dev/mem to
it.
In an ideal world, we would mark the whole region as busy ("owned by a
driver") and exclude it; however, that would be wrong, as we don't really
have actual system RAM at these ranges added to Linux ("busy system RAM").
Instead, we want to mark such ranges as "not actual busy system RAM but
still soft-reserved and prepared by a driver for future use."
Let's teach iomem_is_exclusive() to reject access to any range with
"IORESOURCE_SYSTEM_RAM | IORESOURCE_EXCLUSIVE", even if not busy and even
if "iomem=relaxed" is set. Introduce EXCLUSIVE_SYSTEM_RAM to make it
easier for applicable drivers to depend on this setting in their Kconfig.
For now, there are no applicable ranges and we'll modify virtio-mem next
to properly set IORESOURCE_EXCLUSIVE on the parent resource container it
creates to contain all actual busy system RAM added via
add_memory_driver_managed().
Link: https://lkml.kernel.org/r/20210920142856.17758-3-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-11-09 02:35:50 +00:00
|
|
|
# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
|
|
|
|
# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
|
|
|
|
# /dev/mem.
|
|
|
|
config EXCLUSIVE_SYSTEM_RAM
|
|
|
|
def_bool y
|
|
|
|
depends on !DEVMEM || STRICT_DEVMEM
|
|
|
|
|
2013-02-23 00:33:00 +00:00
|
|
|
#
|
|
|
|
# Only be set on architectures that have completely implemented memory hotplug
|
|
|
|
# feature. If you are not sure, don't touch it.
|
|
|
|
#
|
|
|
|
config HAVE_BOOTMEM_INFO_NODE
|
|
|
|
def_bool n
|
|
|
|
|
2021-05-05 01:38:17 +00:00
|
|
|
config ARCH_ENABLE_MEMORY_HOTPLUG
|
|
|
|
bool
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config ARCH_ENABLE_MEMORY_HOTREMOVE
|
|
|
|
bool
|
|
|
|
|
2005-10-30 01:16:54 +00:00
|
|
|
# eventually, we can have this option just 'select SPARSEMEM'
|
2022-05-19 21:08:53 +00:00
|
|
|
menuconfig MEMORY_HOTPLUG
|
|
|
|
bool "Memory hotplug"
|
2020-10-16 03:08:23 +00:00
|
|
|
select MEMORY_ISOLATION
|
2021-11-05 20:44:20 +00:00
|
|
|
depends on SPARSEMEM
|
2013-05-21 03:49:35 +00:00
|
|
|
depends on ARCH_ENABLE_MEMORY_HOTPLUG
|
2021-11-05 20:44:28 +00:00
|
|
|
depends on 64BIT
|
2020-02-16 20:01:04 +00:00
|
|
|
select NUMA_KEEP_MEMINFO if NUMA
|
2005-10-30 01:16:54 +00:00
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
if MEMORY_HOTPLUG
|
|
|
|
|
2016-05-20 00:13:03 +00:00
|
|
|
config MEMORY_HOTPLUG_DEFAULT_ONLINE
|
2019-12-01 01:58:23 +00:00
|
|
|
bool "Online the newly added memory blocks by default"
|
|
|
|
depends on MEMORY_HOTPLUG
|
|
|
|
help
|
2016-05-20 00:13:03 +00:00
|
|
|
This option sets the default policy setting for memory hotplug
|
|
|
|
onlining policy (/sys/devices/system/memory/auto_online_blocks) which
|
|
|
|
determines what happens to newly added memory regions. Policy setting
|
|
|
|
can always be changed at runtime.
|
2019-06-07 18:54:32 +00:00
|
|
|
See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
|
2016-05-20 00:13:03 +00:00
|
|
|
|
|
|
|
Say Y here if you want all hot-plugged memory blocks to appear in
|
|
|
|
'online' state by default.
|
|
|
|
Say N here if you want the default policy to keep all hot-plugged
|
|
|
|
memory blocks in 'offline' state.
|
|
|
|
|
2007-10-16 08:26:12 +00:00
|
|
|
config MEMORY_HOTREMOVE
|
|
|
|
bool "Allow for memory hot remove"
|
2013-09-27 15:18:09 +00:00
|
|
|
select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
|
2007-10-16 08:26:12 +00:00
|
|
|
depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
|
|
|
|
depends on MIGRATION
|
|
|
|
|
mm,memory_hotplug: allocate memmap from the added memory range
Physical memory hotadd has to allocate a memmap (struct page array) for
the newly added memory section. Currently, alloc_pages_node() is used
for those allocations.
This has some disadvantages:
a) an existing memory is consumed for that purpose
(eg: ~2MB per 128MB memory section on x86_64)
This can even lead to extreme cases where system goes OOM because
the physically hotplugged memory depletes the available memory before
it is onlined.
b) if the whole node is movable then we have off-node struct pages
which has performance drawbacks.
c) It might be there are no PMD_ALIGNED chunks so memmap array gets
populated with base pages.
This can be improved when CONFIG_SPARSEMEM_VMEMMAP is enabled.
Vmemap page tables can map arbitrary memory. That means that we can
reserve a part of the physically hotadded memory to back vmemmap page
tables. This implementation uses the beginning of the hotplugged memory
for that purpose.
There are some non-obviously things to consider though.
Vmemmap pages are allocated/freed during the memory hotplug events
(add_memory_resource(), try_remove_memory()) when the memory is
added/removed. This means that the reserved physical range is not
online although it is used. The most obvious side effect is that
pfn_to_online_page() returns NULL for those pfns. The current design
expects that this should be OK as the hotplugged memory is considered a
garbage until it is onlined. For example hibernation wouldn't save the
content of those vmmemmaps into the image so it wouldn't be restored on
resume but this should be OK as there no real content to recover anyway
while metadata is reachable from other data structures (e.g. vmemmap
page tables).
The reserved space is therefore (de)initialized during the {on,off}line
events (mhp_{de}init_memmap_on_memory). That is done by extracting page
allocator independent initialization from the regular onlining path.
The primary reason to handle the reserved space outside of
{on,off}line_pages is to make each initialization specific to the
purpose rather than special case them in a single function.
As per above, the functions that are introduced are:
- mhp_init_memmap_on_memory:
Initializes vmemmap pages by calling move_pfn_range_to_zone(), calls
kasan_add_zero_shadow(), and onlines as many sections as vmemmap pages
fully span.
- mhp_deinit_memmap_on_memory:
Offlines as many sections as vmemmap pages fully span, removes the
range from zhe zone by remove_pfn_range_from_zone(), and calls
kasan_remove_zero_shadow() for the range.
The new function memory_block_online() calls mhp_init_memmap_on_memory()
before doing the actual online_pages(). Should online_pages() fail, we
clean up by calling mhp_deinit_memmap_on_memory(). Adjusting of
present_pages is done at the end once we know that online_pages()
succedeed.
On offline, memory_block_offline() needs to unaccount vmemmap pages from
present_pages() before calling offline_pages(). This is necessary because
offline_pages() tears down some structures based on the fact whether the
node or the zone become empty. If offline_pages() fails, we account back
vmemmap pages. If it succeeds, we call mhp_deinit_memmap_on_memory().
Hot-remove:
We need to be careful when removing memory, as adding and
removing memory needs to be done with the same granularity.
To check that this assumption is not violated, we check the
memory range we want to remove and if a) any memory block has
vmemmap pages and b) the range spans more than a single memory
block, we scream out loud and refuse to proceed.
If all is good and the range was using memmap on memory (aka vmemmap pages),
we construct an altmap structure so free_hugepage_table does the right
thing and calls vmem_altmap_free instead of free_pagetable.
Link: https://lkml.kernel.org/r/20210421102701.25051-5-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-05-05 01:39:42 +00:00
|
|
|
config MHP_MEMMAP_ON_MEMORY
|
|
|
|
def_bool y
|
|
|
|
depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
|
|
|
|
depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
endif # MEMORY_HOTPLUG
|
|
|
|
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
# Heavily threaded applications may benefit from splitting the mm-wide
|
|
|
|
# page_table_lock, so that faults on different parts of the user address
|
|
|
|
# space can be handled with less contention: split it at this NR_CPUS.
|
|
|
|
# Default to 4 for wider testing, though 8 might be more appropriate.
|
|
|
|
# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
|
2005-11-23 21:37:37 +00:00
|
|
|
# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
|
2020-05-26 17:33:01 +00:00
|
|
|
# SPARC32 allocates multiple pte tables within a single page, and therefore
|
|
|
|
# a per-page lock leads to problems when multiple tables need to be locked
|
|
|
|
# at the same time (e.g. copy_page_range()).
|
2009-12-15 01:59:02 +00:00
|
|
|
# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
#
|
|
|
|
config SPLIT_PTLOCK_CPUS
|
|
|
|
int
|
2014-04-07 22:37:14 +00:00
|
|
|
default "999999" if !MMU
|
2009-12-15 01:59:02 +00:00
|
|
|
default "999999" if ARM && !CPU_CACHE_VIPT
|
|
|
|
default "999999" if PARISC && !PA20
|
2020-05-26 17:33:01 +00:00
|
|
|
default "999999" if SPARC32
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
default "4"
|
2006-01-08 09:00:49 +00:00
|
|
|
|
2013-11-14 22:31:07 +00:00
|
|
|
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
2014-12-20 20:41:11 +00:00
|
|
|
bool
|
2013-11-14 22:31:07 +00:00
|
|
|
|
2014-10-09 22:29:32 +00:00
|
|
|
#
|
|
|
|
# support for memory balloon
|
|
|
|
config MEMORY_BALLOON
|
2014-12-20 20:41:11 +00:00
|
|
|
bool
|
2014-10-09 22:29:32 +00:00
|
|
|
|
2012-12-12 00:02:38 +00:00
|
|
|
#
|
|
|
|
# support for memory balloon compaction
|
|
|
|
config BALLOON_COMPACTION
|
|
|
|
bool "Allow for balloon memory compaction/migration"
|
|
|
|
def_bool y
|
2014-10-09 22:29:32 +00:00
|
|
|
depends on COMPACTION && MEMORY_BALLOON
|
2012-12-12 00:02:38 +00:00
|
|
|
help
|
|
|
|
Memory fragmentation introduced by ballooning might reduce
|
|
|
|
significantly the number of 2MB contiguous memory blocks that can be
|
|
|
|
used within a guest, thus imposing performance penalties associated
|
|
|
|
with the reduced number of transparent huge pages that could be used
|
|
|
|
by the guest workload. Allowing the compaction & migration for memory
|
|
|
|
pages enlisted as being part of memory balloon devices avoids the
|
|
|
|
scenario aforementioned and helps improving memory defragmentation.
|
|
|
|
|
2010-05-24 21:32:21 +00:00
|
|
|
#
|
|
|
|
# support for memory compaction
|
|
|
|
config COMPACTION
|
|
|
|
bool "Allow for memory compaction"
|
2012-10-08 23:33:03 +00:00
|
|
|
def_bool y
|
2010-05-24 21:32:21 +00:00
|
|
|
select MIGRATION
|
2011-01-25 23:07:25 +00:00
|
|
|
depends on MMU
|
2010-05-24 21:32:21 +00:00
|
|
|
help
|
2019-12-01 01:58:23 +00:00
|
|
|
Compaction is the only memory management component to form
|
|
|
|
high order (larger physically contiguous) memory blocks
|
|
|
|
reliably. The page allocator relies on compaction heavily and
|
|
|
|
the lack of the feature can lead to unexpected OOM killer
|
|
|
|
invocations for high order memory requests. You shouldn't
|
|
|
|
disable this option unless there really is a strong reason for
|
|
|
|
it and then we would be really interested to hear about that at
|
|
|
|
linux-mm@kvack.org.
|
2010-05-24 21:32:21 +00:00
|
|
|
|
2020-04-07 03:04:56 +00:00
|
|
|
#
|
|
|
|
# support for free page reporting
|
|
|
|
config PAGE_REPORTING
|
|
|
|
bool "Free page reporting"
|
|
|
|
def_bool n
|
|
|
|
help
|
|
|
|
Free page reporting allows for the incremental acquisition of
|
|
|
|
free pages from the buddy allocator for the purpose of reporting
|
|
|
|
those pages to another entity, such as a hypervisor, so that the
|
|
|
|
memory can be freed within the host for other uses.
|
|
|
|
|
2006-01-08 09:00:49 +00:00
|
|
|
#
|
|
|
|
# support for page migration
|
|
|
|
#
|
|
|
|
config MIGRATION
|
2006-03-22 08:09:12 +00:00
|
|
|
bool "Page migration"
|
2006-06-23 09:03:37 +00:00
|
|
|
def_bool y
|
2013-09-12 22:14:08 +00:00
|
|
|
depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
|
2006-03-22 08:09:12 +00:00
|
|
|
help
|
|
|
|
Allows the migration of the physical location of pages of processes
|
2010-05-24 21:32:21 +00:00
|
|
|
while the virtual addresses are not changed. This is useful in
|
|
|
|
two situations. The first is on NUMA systems to put pages nearer
|
|
|
|
to the processors accessing. The second is when allocating huge
|
|
|
|
pages as migration can relocate pages to satisfy a huge page
|
|
|
|
allocation instead of reclaiming.
|
2006-06-13 00:11:31 +00:00
|
|
|
|
2022-02-16 04:31:38 +00:00
|
|
|
config DEVICE_MIGRATION
|
2022-02-16 04:31:38 +00:00
|
|
|
def_bool MIGRATION && ZONE_DEVICE
|
2022-02-16 04:31:38 +00:00
|
|
|
|
2014-06-04 23:05:35 +00:00
|
|
|
config ARCH_ENABLE_HUGEPAGE_MIGRATION
|
2014-12-20 20:41:11 +00:00
|
|
|
bool
|
2014-06-04 23:05:35 +00:00
|
|
|
|
2017-09-08 23:10:53 +00:00
|
|
|
config ARCH_ENABLE_THP_MIGRATION
|
|
|
|
bool
|
|
|
|
|
2021-05-05 01:33:19 +00:00
|
|
|
config HUGETLB_PAGE_SIZE_VARIABLE
|
|
|
|
def_bool n
|
|
|
|
help
|
|
|
|
Allows the pageblock_order value to be dynamic instead of just standard
|
|
|
|
HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
|
|
|
|
on a platform.
|
|
|
|
|
2022-03-22 21:43:20 +00:00
|
|
|
Note that the pageblock_order cannot exceed MAX_ORDER - 1 and will be
|
|
|
|
clamped down to MAX_ORDER - 1.
|
|
|
|
|
2019-05-14 00:19:00 +00:00
|
|
|
config CONTIG_ALLOC
|
2019-12-01 01:58:23 +00:00
|
|
|
def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
|
2019-05-14 00:19:00 +00:00
|
|
|
|
2008-09-11 08:31:45 +00:00
|
|
|
config PHYS_ADDR_T_64BIT
|
2018-04-03 14:24:20 +00:00
|
|
|
def_bool 64BIT
|
2008-09-11 08:31:45 +00:00
|
|
|
|
2007-07-17 11:03:37 +00:00
|
|
|
config BOUNCE
|
2013-04-29 22:08:55 +00:00
|
|
|
bool "Enable bounce buffers"
|
|
|
|
default y
|
2021-03-31 07:29:59 +00:00
|
|
|
depends on BLOCK && MMU && HIGHMEM
|
2013-04-29 22:08:55 +00:00
|
|
|
help
|
2021-03-31 07:29:59 +00:00
|
|
|
Enable bounce buffers for devices that cannot access the full range of
|
|
|
|
memory available to the CPU. Enabled by default when HIGHMEM is
|
|
|
|
selected, but you may say n to override this.
|
2007-07-17 11:03:37 +00:00
|
|
|
|
mmu-notifiers: core
With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages.
There are secondary MMUs (with secondary sptes and secondary tlbs) too.
sptes in the kvm case are shadow pagetables, but when I say spte in
mmu-notifier context, I mean "secondary pte". In GRU case there's no
actual secondary pte and there's only a secondary tlb because the GRU
secondary MMU has no knowledge about sptes and every secondary tlb miss
event in the MMU always generates a page fault that has to be resolved by
the CPU (this is not the case of KVM where the a secondary tlb miss will
walk sptes in hardware and it will refill the secondary tlb transparently
to software if the corresponding spte is present). The same way
zap_page_range has to invalidate the pte before freeing the page, the spte
(and secondary tlb) must also be invalidated before any page is freed and
reused.
Currently we take a page_count pin on every page mapped by sptes, but that
means the pages can't be swapped whenever they're mapped by any spte
because they're part of the guest working set. Furthermore a spte unmap
event can immediately lead to a page to be freed when the pin is released
(so requiring the same complex and relatively slow tlb_gather smp safe
logic we have in zap_page_range and that can be avoided completely if the
spte unmap event doesn't require an unpin of the page previously mapped in
the secondary MMU).
The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know
when the VM is swapping or freeing or doing anything on the primary MMU so
that the secondary MMU code can drop sptes before the pages are freed,
avoiding all page pinning and allowing 100% reliable swapping of guest
physical address space. Furthermore it avoids the code that teardown the
mappings of the secondary MMU, to implement a logic like tlb_gather in
zap_page_range that would require many IPI to flush other cpu tlbs, for
each fixed number of spte unmapped.
To make an example: if what happens on the primary MMU is a protection
downgrade (from writeable to wrprotect) the secondary MMU mappings will be
invalidated, and the next secondary-mmu-page-fault will call
get_user_pages and trigger a do_wp_page through get_user_pages if it
called get_user_pages with write=1, and it'll re-establishing an updated
spte or secondary-tlb-mapping on the copied page. Or it will setup a
readonly spte or readonly tlb mapping if it's a guest-read, if it calls
get_user_pages with write=0. This is just an example.
This allows to map any page pointed by any pte (and in turn visible in the
primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an
full MMU with both sptes and secondary-tlb like the shadow-pagetable layer
with kvm), or a remote DMA in software like XPMEM (hence needing of
schedule in XPMEM code to send the invalidate to the remote node, while no
need to schedule in kvm/gru as it's an immediate event like invalidating
primary-mmu pte).
At least for KVM without this patch it's impossible to swap guests
reliably. And having this feature and removing the page pin allows
several other optimizations that simplify life considerably.
Dependencies:
1) mm_take_all_locks() to register the mmu notifier when the whole VM
isn't doing anything with "mm". This allows mmu notifier users to keep
track if the VM is in the middle of the invalidate_range_begin/end
critical section with an atomic counter incraese in range_begin and
decreased in range_end. No secondary MMU page fault is allowed to map
any spte or secondary tlb reference, while the VM is in the middle of
range_begin/end as any page returned by get_user_pages in that critical
section could later immediately be freed without any further
->invalidate_page notification (invalidate_range_begin/end works on
ranges and ->invalidate_page isn't called immediately before freeing
the page). To stop all page freeing and pagetable overwrites the
mmap_sem must be taken in write mode and all other anon_vma/i_mmap
locks must be taken too.
2) It'd be a waste to add branches in the VM if nobody could possibly
run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if
CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of
mmu notifiers, but this already allows to compile a KVM external module
against a kernel with mmu notifiers enabled and from the next pull from
kvm.git we'll start using them. And GRU/XPMEM will also be able to
continue the development by enabling KVM=m in their config, until they
submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can
also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n).
This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM
are all =n.
The mmu_notifier_register call can fail because mm_take_all_locks may be
interrupted by a signal and return -EINTR. Because mmu_notifier_reigster
is used when a driver startup, a failure can be gracefully handled. Here
an example of the change applied to kvm to register the mmu notifiers.
Usually when a driver startups other allocations are required anyway and
-ENOMEM failure paths exists already.
struct kvm *kvm_arch_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ int err;
if (!kvm)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm);
+ if (err) {
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+
return kvm;
}
mmu_notifier_unregister returns void and it's reliable.
The patch also adds a few needed but missing includes that would prevent
kernel to compile after these changes on non-x86 archs (x86 didn't need
them by luck).
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix mm/filemap_xip.c build]
[akpm@linux-foundation.org: fix mm/mmu_notifier.c build]
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Kanoj Sarcar <kanojsarcar@yahoo.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Chris Wright <chrisw@redhat.com>
Cc: Marcelo Tosatti <marcelo@kvack.org>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Cc: Izik Eidus <izike@qumranet.com>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-28 22:46:29 +00:00
|
|
|
config MMU_NOTIFIER
|
|
|
|
bool
|
2014-12-05 16:24:45 +00:00
|
|
|
select SRCU
|
mm/mmu_notifier: add an interval tree notifier
Of the 13 users of mmu_notifiers, 8 of them use only
invalidate_range_start/end() and immediately intersect the
mmu_notifier_range with some kind of internal list of VAs. 4 use an
interval tree (i915_gem, radeon_mn, umem_odp, hfi1). 4 use a linked list
of some kind (scif_dma, vhost, gntdev, hmm)
And the remaining 5 either don't use invalidate_range_start() or do some
special thing with it.
It turns out that building a correct scheme with an interval tree is
pretty complicated, particularly if the use case is synchronizing against
another thread doing get_user_pages(). Many of these implementations have
various subtle and difficult to fix races.
This approach puts the interval tree as common code at the top of the mmu
notifier call tree and implements a shareable locking scheme.
It includes:
- An interval tree tracking VA ranges, with per-range callbacks
- A read/write locking scheme for the interval tree that avoids
sleeping in the notifier path (for OOM killer)
- A sequence counter based collision-retry locking scheme to tell
device page fault that a VA range is being concurrently invalidated.
This is based on various ideas:
- hmm accumulates invalidated VA ranges and releases them when all
invalidates are done, via active_invalidate_ranges count.
This approach avoids having to intersect the interval tree twice (as
umem_odp does) at the potential cost of a longer device page fault.
- kvm/umem_odp use a sequence counter to drive the collision retry,
via invalidate_seq
- a deferred work todo list on unlock scheme like RTNL, via deferred_list.
This makes adding/removing interval tree members more deterministic
- seqlock, except this version makes the seqlock idea multi-holder on the
write side by protecting it with active_invalidate_ranges and a spinlock
To minimize MM overhead when only the interval tree is being used, the
entire SRCU and hlist overheads are dropped using some simple
branches. Similarly the interval tree overhead is dropped when in hlist
mode.
The overhead from the mandatory spinlock is broadly the same as most of
existing users which already had a lock (or two) of some sort on the
invalidation path.
Link: https://lore.kernel.org/r/20191112202231.3856-3-jgg@ziepe.ca
Acked-by: Christian König <christian.koenig@amd.com>
Tested-by: Philip Yang <Philip.Yang@amd.com>
Tested-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2019-11-12 20:22:19 +00:00
|
|
|
select INTERVAL_TREE
|
2009-05-06 23:03:05 +00:00
|
|
|
|
2009-09-22 00:01:57 +00:00
|
|
|
config KSM
|
|
|
|
bool "Enable KSM for page merging"
|
|
|
|
depends on MMU
|
2018-12-28 08:34:05 +00:00
|
|
|
select XXHASH
|
2009-09-22 00:01:57 +00:00
|
|
|
help
|
|
|
|
Enable Kernel Samepage Merging: KSM periodically scans those areas
|
|
|
|
of an application's address space that an app has advised may be
|
|
|
|
mergeable. When it finds pages of identical content, it replaces
|
2009-12-15 01:59:34 +00:00
|
|
|
the many instances by a single page with that content, so
|
2009-09-22 00:01:57 +00:00
|
|
|
saving memory until one or another app needs to modify the content.
|
|
|
|
Recommended for use with KVM, or with other duplicative applications.
|
2022-06-27 06:00:26 +00:00
|
|
|
See Documentation/mm/ksm.rst for more information: KSM is inactive
|
2009-10-07 23:32:22 +00:00
|
|
|
until a program has madvised that an area is MADV_MERGEABLE, and
|
|
|
|
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
|
2009-09-22 00:01:57 +00:00
|
|
|
|
2009-06-03 20:04:31 +00:00
|
|
|
config DEFAULT_MMAP_MIN_ADDR
|
2019-12-01 01:58:23 +00:00
|
|
|
int "Low address space to protect from user allocation"
|
2009-12-15 19:27:45 +00:00
|
|
|
depends on MMU
|
2019-12-01 01:58:23 +00:00
|
|
|
default 4096
|
|
|
|
help
|
2009-06-03 20:04:31 +00:00
|
|
|
This is the portion of low virtual memory which should be protected
|
|
|
|
from userspace allocation. Keeping a user from writing to low pages
|
|
|
|
can help reduce the impact of kernel NULL pointer bugs.
|
|
|
|
|
|
|
|
For most ia64, ppc64 and x86 users with lots of address space
|
|
|
|
a value of 65536 is reasonable and should cause no problems.
|
|
|
|
On arm and other archs it should not be higher than 32768.
|
2009-07-31 16:54:11 +00:00
|
|
|
Programs which use vm86 functionality or have some need to map
|
|
|
|
this low address space will need CAP_SYS_RAWIO or disable this
|
|
|
|
protection by setting the value to 0.
|
2009-06-03 20:04:31 +00:00
|
|
|
|
|
|
|
This value can be changed after boot using the
|
|
|
|
/proc/sys/vm/mmap_min_addr tunable.
|
|
|
|
|
2009-09-26 16:35:07 +00:00
|
|
|
config ARCH_SUPPORTS_MEMORY_FAILURE
|
|
|
|
bool
|
2009-06-03 20:04:31 +00:00
|
|
|
|
2009-09-16 09:50:15 +00:00
|
|
|
config MEMORY_FAILURE
|
|
|
|
depends on MMU
|
2009-09-26 16:35:07 +00:00
|
|
|
depends on ARCH_SUPPORTS_MEMORY_FAILURE
|
2009-09-16 09:50:15 +00:00
|
|
|
bool "Enable recovery from hardware memory errors"
|
2012-07-31 23:43:50 +00:00
|
|
|
select MEMORY_ISOLATION
|
2015-06-24 23:57:36 +00:00
|
|
|
select RAS
|
2009-09-16 09:50:15 +00:00
|
|
|
help
|
|
|
|
Enables code to recover from some memory failures on systems
|
|
|
|
with MCA recovery. This allows a system to continue running
|
|
|
|
even when some of its memory has uncorrected errors. This requires
|
|
|
|
special hardware support and typically ECC memory.
|
|
|
|
|
2009-09-16 09:50:17 +00:00
|
|
|
config HWPOISON_INJECT
|
2009-12-16 11:20:00 +00:00
|
|
|
tristate "HWPoison pages injector"
|
2009-12-21 18:56:42 +00:00
|
|
|
depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
|
2009-12-16 11:19:59 +00:00
|
|
|
select PROC_PAGE_MONITOR
|
2009-09-16 09:50:17 +00:00
|
|
|
|
2009-05-06 23:03:05 +00:00
|
|
|
config NOMMU_INITIAL_TRIM_EXCESS
|
|
|
|
int "Turn on mmap() excess space trimming before booting"
|
|
|
|
depends on !MMU
|
|
|
|
default 1
|
|
|
|
help
|
|
|
|
The NOMMU mmap() frequently needs to allocate large contiguous chunks
|
|
|
|
of memory on which to store mappings, but it can only ask the system
|
|
|
|
allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
|
|
|
|
more than it requires. To deal with this, mmap() is able to trim off
|
|
|
|
the excess and return it to the allocator.
|
|
|
|
|
|
|
|
If trimming is enabled, the excess is trimmed off and returned to the
|
|
|
|
system allocator, which can cause extra fragmentation, particularly
|
|
|
|
if there are a lot of transient processes.
|
|
|
|
|
|
|
|
If trimming is disabled, the excess is kept, but not used, which for
|
|
|
|
long-term mappings means that the space is wasted.
|
|
|
|
|
|
|
|
Trimming can be dynamically controlled through a sysctl option
|
|
|
|
(/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
|
|
|
|
excess pages there must be before trimming should occur, or zero if
|
|
|
|
no trimming is to occur.
|
|
|
|
|
|
|
|
This option specifies the initial value of this option. The default
|
|
|
|
of 1 says that all excess pages should be trimmed.
|
|
|
|
|
2020-08-12 09:22:30 +00:00
|
|
|
See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
|
2010-09-03 16:22:48 +00:00
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config ARCH_WANT_GENERAL_HUGETLB
|
|
|
|
bool
|
|
|
|
|
|
|
|
config ARCH_WANTS_THP_SWAP
|
|
|
|
def_bool n
|
|
|
|
|
|
|
|
menuconfig TRANSPARENT_HUGEPAGE
|
2011-01-13 23:47:07 +00:00
|
|
|
bool "Transparent Hugepage Support"
|
2021-11-05 20:35:27 +00:00
|
|
|
depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
|
2011-01-13 23:47:07 +00:00
|
|
|
select COMPACTION
|
2018-09-22 20:14:30 +00:00
|
|
|
select XARRAY_MULTI
|
2011-01-13 23:46:39 +00:00
|
|
|
help
|
|
|
|
Transparent Hugepages allows the kernel to use huge pages and
|
|
|
|
huge tlb transparently to the applications whenever possible.
|
|
|
|
This feature can improve computing performance to certain
|
|
|
|
applications by speeding up page faults during memory
|
|
|
|
allocation, by reducing the number of tlb misses and by speeding
|
|
|
|
up the pagetable walking.
|
|
|
|
|
|
|
|
If memory constrained on embedded, you may want to say N.
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
if TRANSPARENT_HUGEPAGE
|
|
|
|
|
2011-01-13 23:47:07 +00:00
|
|
|
choice
|
|
|
|
prompt "Transparent Hugepage Support sysfs defaults"
|
|
|
|
depends on TRANSPARENT_HUGEPAGE
|
|
|
|
default TRANSPARENT_HUGEPAGE_ALWAYS
|
|
|
|
help
|
|
|
|
Selects the sysfs defaults for Transparent Hugepage Support.
|
|
|
|
|
|
|
|
config TRANSPARENT_HUGEPAGE_ALWAYS
|
|
|
|
bool "always"
|
|
|
|
help
|
|
|
|
Enabling Transparent Hugepage always, can increase the
|
|
|
|
memory footprint of applications without a guaranteed
|
|
|
|
benefit but it will work automatically for all applications.
|
|
|
|
|
|
|
|
config TRANSPARENT_HUGEPAGE_MADVISE
|
|
|
|
bool "madvise"
|
|
|
|
help
|
|
|
|
Enabling Transparent Hugepage madvise, will only provide a
|
|
|
|
performance improvement benefit to the applications using
|
|
|
|
madvise(MADV_HUGEPAGE) but it won't risk to increase the
|
|
|
|
memory footprint of applications without a guaranteed
|
|
|
|
benefit.
|
|
|
|
endchoice
|
|
|
|
|
mm, THP, swap: delay splitting THP during swap out
Patch series "THP swap: Delay splitting THP during swapping out", v11.
This patchset is to optimize the performance of Transparent Huge Page
(THP) swap.
Recently, the performance of the storage devices improved so fast that
we cannot saturate the disk bandwidth with single logical CPU when do
page swap out even on a high-end server machine. Because the
performance of the storage device improved faster than that of single
logical CPU. And it seems that the trend will not change in the near
future. On the other hand, the THP becomes more and more popular
because of increased memory size. So it becomes necessary to optimize
THP swap performance.
The advantages of the THP swap support include:
- Batch the swap operations for the THP to reduce lock
acquiring/releasing, including allocating/freeing the swap space,
adding/deleting to/from the swap cache, and writing/reading the swap
space, etc. This will help improve the performance of the THP swap.
- The THP swap space read/write will be 2M sequential IO. It is
particularly helpful for the swap read, which are usually 4k random
IO. This will improve the performance of the THP swap too.
- It will help the memory fragmentation, especially when the THP is
heavily used by the applications. The 2M continuous pages will be
free up after THP swapping out.
- It will improve the THP utilization on the system with the swap
turned on. Because the speed for khugepaged to collapse the normal
pages into the THP is quite slow. After the THP is split during the
swapping out, it will take quite long time for the normal pages to
collapse back into the THP after being swapped in. The high THP
utilization helps the efficiency of the page based memory management
too.
There are some concerns regarding THP swap in, mainly because possible
enlarged read/write IO size (for swap in/out) may put more overhead on
the storage device. To deal with that, the THP swap in should be turned
on only when necessary. For example, it can be selected via
"always/never/madvise" logic, to be turned on globally, turned off
globally, or turned on only for VMA with MADV_HUGEPAGE, etc.
This patchset is the first step for the THP swap support. The plan is
to delay splitting THP step by step, finally avoid splitting THP during
the THP swapping out and swap out/in the THP as a whole.
As the first step, in this patchset, the splitting huge page is delayed
from almost the first step of swapping out to after allocating the swap
space for the THP and adding the THP into the swap cache. This will
reduce lock acquiring/releasing for the locks used for the swap cache
management.
With the patchset, the swap out throughput improves 15.5% (from about
3.73GB/s to about 4.31GB/s) in the vm-scalability swap-w-seq test case
with 8 processes. The test is done on a Xeon E5 v3 system. The swap
device used is a RAM simulated PMEM (persistent memory) device. To test
the sequential swapping out, the test case creates 8 processes, which
sequentially allocate and write to the anonymous pages until the RAM and
part of the swap device is used up.
This patch (of 5):
In this patch, splitting huge page is delayed from almost the first step
of swapping out to after allocating the swap space for the THP
(Transparent Huge Page) and adding the THP into the swap cache. This
will batch the corresponding operation, thus improve THP swap out
throughput.
This is the first step for the THP swap optimization. The plan is to
delay splitting the THP step by step and avoid splitting the THP
finally.
In this patch, one swap cluster is used to hold the contents of each THP
swapped out. So, the size of the swap cluster is changed to that of the
THP (Transparent Huge Page) on x86_64 architecture (512). For other
architectures which want such THP swap optimization,
ARCH_USES_THP_SWAP_CLUSTER needs to be selected in the Kconfig file for
the architecture. In effect, this will enlarge swap cluster size by 2
times on x86_64. Which may make it harder to find a free cluster when
the swap space becomes fragmented. So that, this may reduce the
continuous swap space allocation and sequential write in theory. The
performance test in 0day shows no regressions caused by this.
In the future of THP swap optimization, some information of the swapped
out THP (such as compound map count) will be recorded in the
swap_cluster_info data structure.
The mem cgroup swap accounting functions are enhanced to support charge
or uncharge a swap cluster backing a THP as a whole.
The swap cluster allocate/free functions are added to allocate/free a
swap cluster for a THP. A fair simple algorithm is used for swap
cluster allocation, that is, only the first swap device in priority list
will be tried to allocate the swap cluster. The function will fail if
the trying is not successful, and the caller will fallback to allocate a
single swap slot instead. This works good enough for normal cases. If
the difference of the number of the free swap clusters among multiple
swap devices is significant, it is possible that some THPs are split
earlier than necessary. For example, this could be caused by big size
difference among multiple swap devices.
The swap cache functions is enhanced to support add/delete THP to/from
the swap cache as a set of (HPAGE_PMD_NR) sub-pages. This may be
enhanced in the future with multi-order radix tree. But because we will
split the THP soon during swapping out, that optimization doesn't make
much sense for this first step.
The THP splitting functions are enhanced to support to split THP in swap
cache during swapping out. The page lock will be held during allocating
the swap cluster, adding the THP into the swap cache and splitting the
THP. So in the code path other than swapping out, if the THP need to be
split, the PageSwapCache(THP) will be always false.
The swap cluster is only available for SSD, so the THP swap optimization
in this patchset has no effect for HDD.
[ying.huang@intel.com: fix two issues in THP optimize patch]
Link: http://lkml.kernel.org/r/87k25ed8zo.fsf@yhuang-dev.intel.com
[hannes@cmpxchg.org: extensive cleanups and simplifications, reduce code size]
Link: http://lkml.kernel.org/r/20170515112522.32457-2-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Andrew Morton <akpm@linux-foundation.org> [for config option]
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> [for changes in huge_memory.c and huge_mm.h]
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-07-06 22:37:18 +00:00
|
|
|
config THP_SWAP
|
|
|
|
def_bool y
|
2018-08-17 22:49:41 +00:00
|
|
|
depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP
|
mm, THP, swap: delay splitting THP during swap out
Patch series "THP swap: Delay splitting THP during swapping out", v11.
This patchset is to optimize the performance of Transparent Huge Page
(THP) swap.
Recently, the performance of the storage devices improved so fast that
we cannot saturate the disk bandwidth with single logical CPU when do
page swap out even on a high-end server machine. Because the
performance of the storage device improved faster than that of single
logical CPU. And it seems that the trend will not change in the near
future. On the other hand, the THP becomes more and more popular
because of increased memory size. So it becomes necessary to optimize
THP swap performance.
The advantages of the THP swap support include:
- Batch the swap operations for the THP to reduce lock
acquiring/releasing, including allocating/freeing the swap space,
adding/deleting to/from the swap cache, and writing/reading the swap
space, etc. This will help improve the performance of the THP swap.
- The THP swap space read/write will be 2M sequential IO. It is
particularly helpful for the swap read, which are usually 4k random
IO. This will improve the performance of the THP swap too.
- It will help the memory fragmentation, especially when the THP is
heavily used by the applications. The 2M continuous pages will be
free up after THP swapping out.
- It will improve the THP utilization on the system with the swap
turned on. Because the speed for khugepaged to collapse the normal
pages into the THP is quite slow. After the THP is split during the
swapping out, it will take quite long time for the normal pages to
collapse back into the THP after being swapped in. The high THP
utilization helps the efficiency of the page based memory management
too.
There are some concerns regarding THP swap in, mainly because possible
enlarged read/write IO size (for swap in/out) may put more overhead on
the storage device. To deal with that, the THP swap in should be turned
on only when necessary. For example, it can be selected via
"always/never/madvise" logic, to be turned on globally, turned off
globally, or turned on only for VMA with MADV_HUGEPAGE, etc.
This patchset is the first step for the THP swap support. The plan is
to delay splitting THP step by step, finally avoid splitting THP during
the THP swapping out and swap out/in the THP as a whole.
As the first step, in this patchset, the splitting huge page is delayed
from almost the first step of swapping out to after allocating the swap
space for the THP and adding the THP into the swap cache. This will
reduce lock acquiring/releasing for the locks used for the swap cache
management.
With the patchset, the swap out throughput improves 15.5% (from about
3.73GB/s to about 4.31GB/s) in the vm-scalability swap-w-seq test case
with 8 processes. The test is done on a Xeon E5 v3 system. The swap
device used is a RAM simulated PMEM (persistent memory) device. To test
the sequential swapping out, the test case creates 8 processes, which
sequentially allocate and write to the anonymous pages until the RAM and
part of the swap device is used up.
This patch (of 5):
In this patch, splitting huge page is delayed from almost the first step
of swapping out to after allocating the swap space for the THP
(Transparent Huge Page) and adding the THP into the swap cache. This
will batch the corresponding operation, thus improve THP swap out
throughput.
This is the first step for the THP swap optimization. The plan is to
delay splitting the THP step by step and avoid splitting the THP
finally.
In this patch, one swap cluster is used to hold the contents of each THP
swapped out. So, the size of the swap cluster is changed to that of the
THP (Transparent Huge Page) on x86_64 architecture (512). For other
architectures which want such THP swap optimization,
ARCH_USES_THP_SWAP_CLUSTER needs to be selected in the Kconfig file for
the architecture. In effect, this will enlarge swap cluster size by 2
times on x86_64. Which may make it harder to find a free cluster when
the swap space becomes fragmented. So that, this may reduce the
continuous swap space allocation and sequential write in theory. The
performance test in 0day shows no regressions caused by this.
In the future of THP swap optimization, some information of the swapped
out THP (such as compound map count) will be recorded in the
swap_cluster_info data structure.
The mem cgroup swap accounting functions are enhanced to support charge
or uncharge a swap cluster backing a THP as a whole.
The swap cluster allocate/free functions are added to allocate/free a
swap cluster for a THP. A fair simple algorithm is used for swap
cluster allocation, that is, only the first swap device in priority list
will be tried to allocate the swap cluster. The function will fail if
the trying is not successful, and the caller will fallback to allocate a
single swap slot instead. This works good enough for normal cases. If
the difference of the number of the free swap clusters among multiple
swap devices is significant, it is possible that some THPs are split
earlier than necessary. For example, this could be caused by big size
difference among multiple swap devices.
The swap cache functions is enhanced to support add/delete THP to/from
the swap cache as a set of (HPAGE_PMD_NR) sub-pages. This may be
enhanced in the future with multi-order radix tree. But because we will
split the THP soon during swapping out, that optimization doesn't make
much sense for this first step.
The THP splitting functions are enhanced to support to split THP in swap
cache during swapping out. The page lock will be held during allocating
the swap cluster, adding the THP into the swap cache and splitting the
THP. So in the code path other than swapping out, if the THP need to be
split, the PageSwapCache(THP) will be always false.
The swap cluster is only available for SSD, so the THP swap optimization
in this patchset has no effect for HDD.
[ying.huang@intel.com: fix two issues in THP optimize patch]
Link: http://lkml.kernel.org/r/87k25ed8zo.fsf@yhuang-dev.intel.com
[hannes@cmpxchg.org: extensive cleanups and simplifications, reduce code size]
Link: http://lkml.kernel.org/r/20170515112522.32457-2-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Andrew Morton <akpm@linux-foundation.org> [for config option]
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> [for changes in huge_memory.c and huge_mm.h]
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-07-06 22:37:18 +00:00
|
|
|
help
|
|
|
|
Swap transparent huge pages in one piece, without splitting.
|
2018-08-17 22:49:41 +00:00
|
|
|
XXX: For now, swap cluster backing transparent huge page
|
|
|
|
will be split after swapout.
|
mm, THP, swap: delay splitting THP during swap out
Patch series "THP swap: Delay splitting THP during swapping out", v11.
This patchset is to optimize the performance of Transparent Huge Page
(THP) swap.
Recently, the performance of the storage devices improved so fast that
we cannot saturate the disk bandwidth with single logical CPU when do
page swap out even on a high-end server machine. Because the
performance of the storage device improved faster than that of single
logical CPU. And it seems that the trend will not change in the near
future. On the other hand, the THP becomes more and more popular
because of increased memory size. So it becomes necessary to optimize
THP swap performance.
The advantages of the THP swap support include:
- Batch the swap operations for the THP to reduce lock
acquiring/releasing, including allocating/freeing the swap space,
adding/deleting to/from the swap cache, and writing/reading the swap
space, etc. This will help improve the performance of the THP swap.
- The THP swap space read/write will be 2M sequential IO. It is
particularly helpful for the swap read, which are usually 4k random
IO. This will improve the performance of the THP swap too.
- It will help the memory fragmentation, especially when the THP is
heavily used by the applications. The 2M continuous pages will be
free up after THP swapping out.
- It will improve the THP utilization on the system with the swap
turned on. Because the speed for khugepaged to collapse the normal
pages into the THP is quite slow. After the THP is split during the
swapping out, it will take quite long time for the normal pages to
collapse back into the THP after being swapped in. The high THP
utilization helps the efficiency of the page based memory management
too.
There are some concerns regarding THP swap in, mainly because possible
enlarged read/write IO size (for swap in/out) may put more overhead on
the storage device. To deal with that, the THP swap in should be turned
on only when necessary. For example, it can be selected via
"always/never/madvise" logic, to be turned on globally, turned off
globally, or turned on only for VMA with MADV_HUGEPAGE, etc.
This patchset is the first step for the THP swap support. The plan is
to delay splitting THP step by step, finally avoid splitting THP during
the THP swapping out and swap out/in the THP as a whole.
As the first step, in this patchset, the splitting huge page is delayed
from almost the first step of swapping out to after allocating the swap
space for the THP and adding the THP into the swap cache. This will
reduce lock acquiring/releasing for the locks used for the swap cache
management.
With the patchset, the swap out throughput improves 15.5% (from about
3.73GB/s to about 4.31GB/s) in the vm-scalability swap-w-seq test case
with 8 processes. The test is done on a Xeon E5 v3 system. The swap
device used is a RAM simulated PMEM (persistent memory) device. To test
the sequential swapping out, the test case creates 8 processes, which
sequentially allocate and write to the anonymous pages until the RAM and
part of the swap device is used up.
This patch (of 5):
In this patch, splitting huge page is delayed from almost the first step
of swapping out to after allocating the swap space for the THP
(Transparent Huge Page) and adding the THP into the swap cache. This
will batch the corresponding operation, thus improve THP swap out
throughput.
This is the first step for the THP swap optimization. The plan is to
delay splitting the THP step by step and avoid splitting the THP
finally.
In this patch, one swap cluster is used to hold the contents of each THP
swapped out. So, the size of the swap cluster is changed to that of the
THP (Transparent Huge Page) on x86_64 architecture (512). For other
architectures which want such THP swap optimization,
ARCH_USES_THP_SWAP_CLUSTER needs to be selected in the Kconfig file for
the architecture. In effect, this will enlarge swap cluster size by 2
times on x86_64. Which may make it harder to find a free cluster when
the swap space becomes fragmented. So that, this may reduce the
continuous swap space allocation and sequential write in theory. The
performance test in 0day shows no regressions caused by this.
In the future of THP swap optimization, some information of the swapped
out THP (such as compound map count) will be recorded in the
swap_cluster_info data structure.
The mem cgroup swap accounting functions are enhanced to support charge
or uncharge a swap cluster backing a THP as a whole.
The swap cluster allocate/free functions are added to allocate/free a
swap cluster for a THP. A fair simple algorithm is used for swap
cluster allocation, that is, only the first swap device in priority list
will be tried to allocate the swap cluster. The function will fail if
the trying is not successful, and the caller will fallback to allocate a
single swap slot instead. This works good enough for normal cases. If
the difference of the number of the free swap clusters among multiple
swap devices is significant, it is possible that some THPs are split
earlier than necessary. For example, this could be caused by big size
difference among multiple swap devices.
The swap cache functions is enhanced to support add/delete THP to/from
the swap cache as a set of (HPAGE_PMD_NR) sub-pages. This may be
enhanced in the future with multi-order radix tree. But because we will
split the THP soon during swapping out, that optimization doesn't make
much sense for this first step.
The THP splitting functions are enhanced to support to split THP in swap
cache during swapping out. The page lock will be held during allocating
the swap cluster, adding the THP into the swap cache and splitting the
THP. So in the code path other than swapping out, if the THP need to be
split, the PageSwapCache(THP) will be always false.
The swap cluster is only available for SSD, so the THP swap optimization
in this patchset has no effect for HDD.
[ying.huang@intel.com: fix two issues in THP optimize patch]
Link: http://lkml.kernel.org/r/87k25ed8zo.fsf@yhuang-dev.intel.com
[hannes@cmpxchg.org: extensive cleanups and simplifications, reduce code size]
Link: http://lkml.kernel.org/r/20170515112522.32457-2-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Andrew Morton <akpm@linux-foundation.org> [for config option]
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> [for changes in huge_memory.c and huge_mm.h]
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-07-06 22:37:18 +00:00
|
|
|
|
|
|
|
For selection by architectures with reasonable THP sizes.
|
|
|
|
|
2022-05-19 21:08:53 +00:00
|
|
|
config READ_ONLY_THP_FOR_FS
|
|
|
|
bool "Read-only THP for filesystems (EXPERIMENTAL)"
|
|
|
|
depends on TRANSPARENT_HUGEPAGE && SHMEM
|
|
|
|
|
|
|
|
help
|
|
|
|
Allow khugepaged to put read-only file-backed pages in THP.
|
|
|
|
|
|
|
|
This is marked experimental because it is a new feature. Write
|
|
|
|
support of file THPs will be developed in the next few release
|
|
|
|
cycles.
|
|
|
|
|
|
|
|
endif # TRANSPARENT_HUGEPAGE
|
|
|
|
|
2010-09-03 16:22:48 +00:00
|
|
|
#
|
|
|
|
# UP and nommu archs use km based percpu allocator
|
|
|
|
#
|
|
|
|
config NEED_PER_CPU_KM
|
2021-11-30 17:29:54 +00:00
|
|
|
depends on !SMP || !MMU
|
2010-09-03 16:22:48 +00:00
|
|
|
bool
|
|
|
|
default y
|
2011-05-26 16:01:36 +00:00
|
|
|
|
mm: percpu: generalize percpu related config
Patch series "mm: percpu: Cleanup percpu first chunk function".
When supporting page mapping percpu first chunk allocator on arm64, we
found there are lots of duplicated codes in percpu embed/page first chunk
allocator. This patchset is aimed to cleanup them and should no function
change.
The currently supported status about 'embed' and 'page' in Archs shows
below,
embed: NEED_PER_CPU_PAGE_FIRST_CHUNK
page: NEED_PER_CPU_EMBED_FIRST_CHUNK
embed page
------------------------
arm64 Y Y
mips Y N
powerpc Y Y
riscv Y N
sparc Y Y
x86 Y Y
------------------------
There are two interfaces about percpu first chunk allocator,
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
The pcpu_fc_alloc_fn_t/pcpu_fc_free_fn_t is killed, we provide generic
pcpu_fc_alloc() and pcpu_fc_free() function, which are called in the
pcpu_embed/page_first_chunk().
1) For pcpu_embed_first_chunk(), pcpu_fc_cpu_to_node_fn_t is needed to be
provided when archs supported NUMA.
2) For pcpu_page_first_chunk(), the pcpu_fc_populate_pte_fn_t is killed too,
a generic pcpu_populate_pte() which marked '__weak' is provided, if you
need a different function to populate pte on the arch(like x86), please
provide its own implementation.
[1] https://github.com/kevin78/linux.git percpu-cleanup
This patch (of 4):
The HAVE_SETUP_PER_CPU_AREA/NEED_PER_CPU_EMBED_FIRST_CHUNK/
NEED_PER_CPU_PAGE_FIRST_CHUNK/USE_PERCPU_NUMA_NODE_ID configs, which have
duplicate definitions on platforms that subscribe it.
Move them into mm, drop these redundant definitions and instead just
select it on applicable platforms.
Link: https://lkml.kernel.org/r/20211216112359.103822-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20211216112359.103822-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Cc: Will Deacon <will@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-01-20 02:07:41 +00:00
|
|
|
config NEED_PER_CPU_EMBED_FIRST_CHUNK
|
|
|
|
bool
|
|
|
|
|
|
|
|
config NEED_PER_CPU_PAGE_FIRST_CHUNK
|
|
|
|
bool
|
|
|
|
|
|
|
|
config USE_PERCPU_NUMA_NODE_ID
|
|
|
|
bool
|
|
|
|
|
|
|
|
config HAVE_SETUP_PER_CPU_AREA
|
|
|
|
bool
|
|
|
|
|
2012-04-09 23:10:34 +00:00
|
|
|
config FRONTSWAP
|
2022-01-22 06:15:14 +00:00
|
|
|
bool
|
2013-07-02 05:45:15 +00:00
|
|
|
|
|
|
|
config CMA
|
|
|
|
bool "Contiguous Memory Allocator"
|
2018-10-30 22:07:44 +00:00
|
|
|
depends on MMU
|
2013-07-02 05:45:15 +00:00
|
|
|
select MIGRATION
|
|
|
|
select MEMORY_ISOLATION
|
|
|
|
help
|
|
|
|
This enables the Contiguous Memory Allocator which allows other
|
|
|
|
subsystems to allocate big physically-contiguous blocks of memory.
|
|
|
|
CMA reserves a region of memory and allows only movable pages to
|
|
|
|
be allocated from it. This way, the kernel can use the memory for
|
|
|
|
pagecache and when a subsystem requests for contiguous area, the
|
|
|
|
allocated pages are migrated away to serve the contiguous request.
|
|
|
|
|
|
|
|
If unsure, say "n".
|
|
|
|
|
|
|
|
config CMA_DEBUG
|
|
|
|
bool "CMA debug messages (DEVELOPMENT)"
|
|
|
|
depends on DEBUG_KERNEL && CMA
|
|
|
|
help
|
|
|
|
Turns on debug messages in CMA. This produces KERN_DEBUG
|
|
|
|
messages for every CMA call as well as various messages while
|
|
|
|
processing calls such as dma_alloc_from_contiguous().
|
|
|
|
This option does not affect warning and error messages.
|
2013-08-28 22:41:59 +00:00
|
|
|
|
2015-04-14 22:44:57 +00:00
|
|
|
config CMA_DEBUGFS
|
|
|
|
bool "CMA debugfs interface"
|
|
|
|
depends on CMA && DEBUG_FS
|
|
|
|
help
|
|
|
|
Turns on the DebugFS interface for CMA.
|
|
|
|
|
2021-05-05 01:37:28 +00:00
|
|
|
config CMA_SYSFS
|
|
|
|
bool "CMA information through sysfs interface"
|
|
|
|
depends on CMA && SYSFS
|
|
|
|
help
|
|
|
|
This option exposes some sysfs attributes to get information
|
|
|
|
from CMA.
|
|
|
|
|
2014-08-06 23:05:25 +00:00
|
|
|
config CMA_AREAS
|
|
|
|
int "Maximum count of the CMA areas"
|
|
|
|
depends on CMA
|
2020-08-23 23:03:07 +00:00
|
|
|
default 19 if NUMA
|
2014-08-06 23:05:25 +00:00
|
|
|
default 7
|
|
|
|
help
|
|
|
|
CMA allows to create CMA areas for particular purpose, mainly,
|
|
|
|
used as device private area. This parameter sets the maximum
|
|
|
|
number of CMA area in the system.
|
|
|
|
|
2020-08-23 23:03:07 +00:00
|
|
|
If unsure, leave the default value "7" in UMA and "19" in NUMA.
|
2014-08-06 23:05:25 +00:00
|
|
|
|
2014-08-06 23:08:36 +00:00
|
|
|
config MEM_SOFT_DIRTY
|
|
|
|
bool "Track memory changes"
|
|
|
|
depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
|
|
|
|
select PROC_PAGE_MONITOR
|
2013-07-10 23:04:55 +00:00
|
|
|
help
|
2014-08-06 23:08:36 +00:00
|
|
|
This option enables memory changes tracking by introducing a
|
|
|
|
soft-dirty bit on pte-s. This bit it set when someone writes
|
|
|
|
into a page just as regular dirty bit, but unlike the latter
|
|
|
|
it can be cleared by hands.
|
|
|
|
|
2018-04-18 08:07:49 +00:00
|
|
|
See Documentation/admin-guide/mm/soft-dirty.rst for more details.
|
2013-07-10 23:04:55 +00:00
|
|
|
|
2014-04-07 22:39:48 +00:00
|
|
|
config GENERIC_EARLY_IOREMAP
|
|
|
|
bool
|
2014-04-30 21:26:02 +00:00
|
|
|
|
2020-11-06 18:41:36 +00:00
|
|
|
config STACK_MAX_DEFAULT_SIZE_MB
|
|
|
|
int "Default maximum user stack size for 32-bit processes (MB)"
|
|
|
|
default 100
|
2014-04-30 21:26:02 +00:00
|
|
|
range 8 2048
|
|
|
|
depends on STACK_GROWSUP && (!64BIT || COMPAT)
|
|
|
|
help
|
|
|
|
This is the maximum stack size in Megabytes in the VM layout of 32-bit
|
|
|
|
user processes when the stack grows upwards (currently only on parisc
|
2020-11-06 18:41:36 +00:00
|
|
|
arch) when the RLIMIT_STACK hard limit is unlimited.
|
2014-04-30 21:26:02 +00:00
|
|
|
|
2020-11-06 18:41:36 +00:00
|
|
|
A sane initial value is 100 MB.
|
2015-06-30 21:57:02 +00:00
|
|
|
|
|
|
|
config DEFERRED_STRUCT_PAGE_INIT
|
2016-02-05 23:36:21 +00:00
|
|
|
bool "Defer initialisation of struct pages to kthreads"
|
mm: make DEFERRED_STRUCT_PAGE_INIT explicitly depend on SPARSEMEM
The deferred memory initialization relies on section definitions, e.g
PAGES_PER_SECTION, that are only available when CONFIG_SPARSEMEM=y on
most architectures.
Initially DEFERRED_STRUCT_PAGE_INIT depended on explicit
ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT configuration option, but since
the commit 2e3ca40f03bb13709df4 ("mm: relax deferred struct page
requirements") this requirement was relaxed and now it is possible to
enable DEFERRED_STRUCT_PAGE_INIT on architectures that support
DISCONTINGMEM and NO_BOOTMEM which causes build failures.
For instance, setting SMP=y and DEFERRED_STRUCT_PAGE_INIT=y on arc
causes the following build failure:
CC mm/page_alloc.o
mm/page_alloc.c: In function 'update_defer_init':
mm/page_alloc.c:321:14: error: 'PAGES_PER_SECTION'
undeclared (first use in this function); did you mean 'USEC_PER_SEC'?
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
^~~~~~~~~~~~~~~~~
USEC_PER_SEC
mm/page_alloc.c:321:14: note: each undeclared identifier is reported only once for each function it appears in
In file included from include/linux/cache.h:5:0,
from include/linux/printk.h:9,
from include/linux/kernel.h:14,
from include/asm-generic/bug.h:18,
from arch/arc/include/asm/bug.h:32,
from include/linux/bug.h:5,
from include/linux/mmdebug.h:5,
from include/linux/mm.h:9,
from mm/page_alloc.c:18:
mm/page_alloc.c: In function 'deferred_grow_zone':
mm/page_alloc.c:1624:52: error: 'PAGES_PER_SECTION' undeclared (first use in this function); did you mean 'USEC_PER_SEC'?
unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
^
include/uapi/linux/kernel.h:11:47: note: in definition of macro '__ALIGN_KERNEL_MASK'
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
^~~~
include/linux/kernel.h:58:22: note: in expansion of macro '__ALIGN_KERNEL'
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
^~~~~~~~~~~~~~
mm/page_alloc.c:1624:34: note: in expansion of macro 'ALIGN'
unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
^~~~~
In file included from include/asm-generic/bug.h:18:0,
from arch/arc/include/asm/bug.h:32,
from include/linux/bug.h:5,
from include/linux/mmdebug.h:5,
from include/linux/mm.h:9,
from mm/page_alloc.c:18:
mm/page_alloc.c: In function 'free_area_init_node':
mm/page_alloc.c:6379:50: error: 'PAGES_PER_SECTION' undeclared (first use in this function); did you mean 'USEC_PER_SEC'?
pgdat->static_init_pgcnt = min_t(unsigned long, PAGES_PER_SECTION,
^
include/linux/kernel.h:812:22: note: in definition of macro '__typecheck'
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
^
include/linux/kernel.h:836:24: note: in expansion of macro '__safe_cmp'
__builtin_choose_expr(__safe_cmp(x, y), \
^~~~~~~~~~
include/linux/kernel.h:904:27: note: in expansion of macro '__careful_cmp'
#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
^~~~~~~~~~~~~
mm/page_alloc.c:6379:29: note: in expansion of macro 'min_t'
pgdat->static_init_pgcnt = min_t(unsigned long, PAGES_PER_SECTION,
^~~~~
include/linux/kernel.h:836:2: error: first argument to '__builtin_choose_expr' not a constant
__builtin_choose_expr(__safe_cmp(x, y), \
^
include/linux/kernel.h:904:27: note: in expansion of macro '__careful_cmp'
#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
^~~~~~~~~~~~~
mm/page_alloc.c:6379:29: note: in expansion of macro 'min_t'
pgdat->static_init_pgcnt = min_t(unsigned long, PAGES_PER_SECTION,
^~~~~
scripts/Makefile.build:317: recipe for target 'mm/page_alloc.o' failed
Let's make the DEFERRED_STRUCT_PAGE_INIT explicitly depend on SPARSEMEM
as the systems that support DISCONTIGMEM do not seem to have that huge
amounts of memory that would make DEFERRED_STRUCT_PAGE_INIT relevant.
Link: http://lkml.kernel.org/r/1530279308-24988-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Tested-by: Randy Dunlap <rdunlap@infradead.org>
Cc: Pasha Tatashin <Pavel.Tatashin@microsoft.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-08-17 22:47:07 +00:00
|
|
|
depends on SPARSEMEM
|
2018-05-18 23:09:13 +00:00
|
|
|
depends on !NEED_PER_CPU_KM
|
2018-09-20 19:22:30 +00:00
|
|
|
depends on 64BIT
|
mm: parallelize deferred_init_memmap()
Deferred struct page init is a significant bottleneck in kernel boot.
Optimizing it maximizes availability for large-memory systems and allows
spinning up short-lived VMs as needed without having to leave them
running. It also benefits bare metal machines hosting VMs that are
sensitive to downtime. In projects such as VMM Fast Restart[1], where
guest state is preserved across kexec reboot, it helps prevent application
and network timeouts in the guests.
Multithread to take full advantage of system memory bandwidth.
The maximum number of threads is capped at the number of CPUs on the node
because speedups always improve with additional threads on every system
tested, and at this phase of boot, the system is otherwise idle and
waiting on page init to finish.
Helper threads operate on section-aligned ranges to both avoid false
sharing when setting the pageblock's migrate type and to avoid accessing
uninitialized buddy pages, though max order alignment is enough for the
latter.
The minimum chunk size is also a section. There was benefit to using
multiple threads even on relatively small memory (1G) systems, and this is
the smallest size that the alignment allows.
The time (milliseconds) is the slowest node to initialize since boot
blocks until all nodes finish. intel_pstate is loaded in active mode
without hwp and with turbo enabled, and intel_idle is active as well.
Intel(R) Xeon(R) Platinum 8167M CPU @ 2.00GHz (Skylake, bare metal)
2 nodes * 26 cores * 2 threads = 104 CPUs
384G/node = 768G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 4089.7 ( 8.1) -- 1785.7 ( 7.6)
2% ( 1) 1.7% 4019.3 ( 1.5) 3.8% 1717.7 ( 11.8)
12% ( 6) 34.9% 2662.7 ( 2.9) 79.9% 359.3 ( 0.6)
25% ( 13) 39.9% 2459.0 ( 3.6) 91.2% 157.0 ( 0.0)
37% ( 19) 39.2% 2485.0 ( 29.7) 90.4% 172.0 ( 28.6)
50% ( 26) 39.3% 2482.7 ( 25.7) 90.3% 173.7 ( 30.0)
75% ( 39) 39.0% 2495.7 ( 5.5) 89.4% 190.0 ( 1.0)
100% ( 52) 40.2% 2443.7 ( 3.8) 92.3% 138.0 ( 1.0)
Intel(R) Xeon(R) CPU E5-2699C v4 @ 2.20GHz (Broadwell, kvm guest)
1 node * 16 cores * 2 threads = 32 CPUs
192G/node = 192G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 1988.7 ( 9.6) -- 1096.0 ( 11.5)
3% ( 1) 1.1% 1967.0 ( 17.6) 0.3% 1092.7 ( 11.0)
12% ( 4) 41.1% 1170.3 ( 14.2) 73.8% 287.0 ( 3.6)
25% ( 8) 47.1% 1052.7 ( 21.9) 83.9% 177.0 ( 13.5)
38% ( 12) 48.9% 1016.3 ( 12.1) 86.8% 144.7 ( 1.5)
50% ( 16) 48.9% 1015.7 ( 8.1) 87.8% 134.0 ( 4.4)
75% ( 24) 49.1% 1012.3 ( 3.1) 88.1% 130.3 ( 2.3)
100% ( 32) 49.5% 1004.0 ( 5.3) 88.5% 125.7 ( 2.1)
Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz (Haswell, bare metal)
2 nodes * 18 cores * 2 threads = 72 CPUs
128G/node = 256G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 1680.0 ( 4.6) -- 627.0 ( 4.0)
3% ( 1) 0.3% 1675.7 ( 4.5) -0.2% 628.0 ( 3.6)
11% ( 4) 25.6% 1250.7 ( 2.1) 67.9% 201.0 ( 0.0)
25% ( 9) 30.7% 1164.0 ( 17.3) 81.8% 114.3 ( 17.7)
36% ( 13) 31.4% 1152.7 ( 10.8) 84.0% 100.3 ( 17.9)
50% ( 18) 31.5% 1150.7 ( 9.3) 83.9% 101.0 ( 14.1)
75% ( 27) 31.7% 1148.0 ( 5.6) 84.5% 97.3 ( 6.4)
100% ( 36) 32.0% 1142.3 ( 4.0) 85.6% 90.0 ( 1.0)
AMD EPYC 7551 32-Core Processor (Zen, kvm guest)
1 node * 8 cores * 2 threads = 16 CPUs
64G/node = 64G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 1029.3 ( 25.1) -- 240.7 ( 1.5)
6% ( 1) -0.6% 1036.0 ( 7.8) -2.2% 246.0 ( 0.0)
12% ( 2) 11.8% 907.7 ( 8.6) 44.7% 133.0 ( 1.0)
25% ( 4) 13.9% 886.0 ( 10.6) 62.6% 90.0 ( 6.0)
38% ( 6) 17.8% 845.7 ( 14.2) 69.1% 74.3 ( 3.8)
50% ( 8) 16.8% 856.0 ( 22.1) 72.9% 65.3 ( 5.7)
75% ( 12) 15.4% 871.0 ( 29.2) 79.8% 48.7 ( 7.4)
100% ( 16) 21.0% 813.7 ( 21.0) 80.5% 47.0 ( 5.2)
Server-oriented distros that enable deferred page init sometimes run in
small VMs, and they still benefit even though the fraction of boot time
saved is smaller:
AMD EPYC 7551 32-Core Processor (Zen, kvm guest)
1 node * 2 cores * 2 threads = 4 CPUs
16G/node = 16G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 716.0 ( 14.0) -- 49.7 ( 0.6)
25% ( 1) 1.8% 703.0 ( 5.3) -4.0% 51.7 ( 0.6)
50% ( 2) 1.6% 704.7 ( 1.2) 43.0% 28.3 ( 0.6)
75% ( 3) 2.7% 696.7 ( 13.1) 49.7% 25.0 ( 0.0)
100% ( 4) 4.1% 687.0 ( 10.4) 55.7% 22.0 ( 0.0)
Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz (Haswell, kvm guest)
1 node * 2 cores * 2 threads = 4 CPUs
14G/node = 14G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 787.7 ( 6.4) -- 122.3 ( 0.6)
25% ( 1) 0.2% 786.3 ( 10.8) -2.5% 125.3 ( 2.1)
50% ( 2) 5.9% 741.0 ( 13.9) 37.6% 76.3 ( 19.7)
75% ( 3) 8.3% 722.0 ( 19.0) 49.9% 61.3 ( 3.2)
100% ( 4) 9.3% 714.7 ( 9.5) 56.4% 53.3 ( 1.5)
On Josh's 96-CPU and 192G memory system:
Without this patch series:
[ 0.487132] node 0 initialised, 23398907 pages in 292ms
[ 0.499132] node 1 initialised, 24189223 pages in 304ms
...
[ 0.629376] Run /sbin/init as init process
With this patch series:
[ 0.231435] node 1 initialised, 24189223 pages in 32ms
[ 0.236718] node 0 initialised, 23398907 pages in 36ms
[1] https://static.sched.com/hosted_files/kvmforum2019/66/VMM-fast-restart_kvmforum2019.pdf
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Josh Triplett <josh@joshtriplett.org>
Reviewed-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Robert Elliott <elliott@hpe.com>
Cc: Shile Zhang <shile.zhang@linux.alibaba.com>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Link: http://lkml.kernel.org/r/20200527173608.2885243-7-daniel.m.jordan@oracle.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-03 22:59:51 +00:00
|
|
|
select PADATA
|
2015-06-30 21:57:02 +00:00
|
|
|
help
|
|
|
|
Ordinarily all struct pages are initialised during early boot in a
|
|
|
|
single thread. On very large machines this can take a considerable
|
|
|
|
amount of time. If this option is set, large machines will bring up
|
mm: parallelize deferred_init_memmap()
Deferred struct page init is a significant bottleneck in kernel boot.
Optimizing it maximizes availability for large-memory systems and allows
spinning up short-lived VMs as needed without having to leave them
running. It also benefits bare metal machines hosting VMs that are
sensitive to downtime. In projects such as VMM Fast Restart[1], where
guest state is preserved across kexec reboot, it helps prevent application
and network timeouts in the guests.
Multithread to take full advantage of system memory bandwidth.
The maximum number of threads is capped at the number of CPUs on the node
because speedups always improve with additional threads on every system
tested, and at this phase of boot, the system is otherwise idle and
waiting on page init to finish.
Helper threads operate on section-aligned ranges to both avoid false
sharing when setting the pageblock's migrate type and to avoid accessing
uninitialized buddy pages, though max order alignment is enough for the
latter.
The minimum chunk size is also a section. There was benefit to using
multiple threads even on relatively small memory (1G) systems, and this is
the smallest size that the alignment allows.
The time (milliseconds) is the slowest node to initialize since boot
blocks until all nodes finish. intel_pstate is loaded in active mode
without hwp and with turbo enabled, and intel_idle is active as well.
Intel(R) Xeon(R) Platinum 8167M CPU @ 2.00GHz (Skylake, bare metal)
2 nodes * 26 cores * 2 threads = 104 CPUs
384G/node = 768G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 4089.7 ( 8.1) -- 1785.7 ( 7.6)
2% ( 1) 1.7% 4019.3 ( 1.5) 3.8% 1717.7 ( 11.8)
12% ( 6) 34.9% 2662.7 ( 2.9) 79.9% 359.3 ( 0.6)
25% ( 13) 39.9% 2459.0 ( 3.6) 91.2% 157.0 ( 0.0)
37% ( 19) 39.2% 2485.0 ( 29.7) 90.4% 172.0 ( 28.6)
50% ( 26) 39.3% 2482.7 ( 25.7) 90.3% 173.7 ( 30.0)
75% ( 39) 39.0% 2495.7 ( 5.5) 89.4% 190.0 ( 1.0)
100% ( 52) 40.2% 2443.7 ( 3.8) 92.3% 138.0 ( 1.0)
Intel(R) Xeon(R) CPU E5-2699C v4 @ 2.20GHz (Broadwell, kvm guest)
1 node * 16 cores * 2 threads = 32 CPUs
192G/node = 192G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 1988.7 ( 9.6) -- 1096.0 ( 11.5)
3% ( 1) 1.1% 1967.0 ( 17.6) 0.3% 1092.7 ( 11.0)
12% ( 4) 41.1% 1170.3 ( 14.2) 73.8% 287.0 ( 3.6)
25% ( 8) 47.1% 1052.7 ( 21.9) 83.9% 177.0 ( 13.5)
38% ( 12) 48.9% 1016.3 ( 12.1) 86.8% 144.7 ( 1.5)
50% ( 16) 48.9% 1015.7 ( 8.1) 87.8% 134.0 ( 4.4)
75% ( 24) 49.1% 1012.3 ( 3.1) 88.1% 130.3 ( 2.3)
100% ( 32) 49.5% 1004.0 ( 5.3) 88.5% 125.7 ( 2.1)
Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz (Haswell, bare metal)
2 nodes * 18 cores * 2 threads = 72 CPUs
128G/node = 256G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 1680.0 ( 4.6) -- 627.0 ( 4.0)
3% ( 1) 0.3% 1675.7 ( 4.5) -0.2% 628.0 ( 3.6)
11% ( 4) 25.6% 1250.7 ( 2.1) 67.9% 201.0 ( 0.0)
25% ( 9) 30.7% 1164.0 ( 17.3) 81.8% 114.3 ( 17.7)
36% ( 13) 31.4% 1152.7 ( 10.8) 84.0% 100.3 ( 17.9)
50% ( 18) 31.5% 1150.7 ( 9.3) 83.9% 101.0 ( 14.1)
75% ( 27) 31.7% 1148.0 ( 5.6) 84.5% 97.3 ( 6.4)
100% ( 36) 32.0% 1142.3 ( 4.0) 85.6% 90.0 ( 1.0)
AMD EPYC 7551 32-Core Processor (Zen, kvm guest)
1 node * 8 cores * 2 threads = 16 CPUs
64G/node = 64G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 1029.3 ( 25.1) -- 240.7 ( 1.5)
6% ( 1) -0.6% 1036.0 ( 7.8) -2.2% 246.0 ( 0.0)
12% ( 2) 11.8% 907.7 ( 8.6) 44.7% 133.0 ( 1.0)
25% ( 4) 13.9% 886.0 ( 10.6) 62.6% 90.0 ( 6.0)
38% ( 6) 17.8% 845.7 ( 14.2) 69.1% 74.3 ( 3.8)
50% ( 8) 16.8% 856.0 ( 22.1) 72.9% 65.3 ( 5.7)
75% ( 12) 15.4% 871.0 ( 29.2) 79.8% 48.7 ( 7.4)
100% ( 16) 21.0% 813.7 ( 21.0) 80.5% 47.0 ( 5.2)
Server-oriented distros that enable deferred page init sometimes run in
small VMs, and they still benefit even though the fraction of boot time
saved is smaller:
AMD EPYC 7551 32-Core Processor (Zen, kvm guest)
1 node * 2 cores * 2 threads = 4 CPUs
16G/node = 16G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 716.0 ( 14.0) -- 49.7 ( 0.6)
25% ( 1) 1.8% 703.0 ( 5.3) -4.0% 51.7 ( 0.6)
50% ( 2) 1.6% 704.7 ( 1.2) 43.0% 28.3 ( 0.6)
75% ( 3) 2.7% 696.7 ( 13.1) 49.7% 25.0 ( 0.0)
100% ( 4) 4.1% 687.0 ( 10.4) 55.7% 22.0 ( 0.0)
Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz (Haswell, kvm guest)
1 node * 2 cores * 2 threads = 4 CPUs
14G/node = 14G memory
kernel boot deferred init
------------------------ ------------------------
node% (thr) speedup time_ms (stdev) speedup time_ms (stdev)
( 0) -- 787.7 ( 6.4) -- 122.3 ( 0.6)
25% ( 1) 0.2% 786.3 ( 10.8) -2.5% 125.3 ( 2.1)
50% ( 2) 5.9% 741.0 ( 13.9) 37.6% 76.3 ( 19.7)
75% ( 3) 8.3% 722.0 ( 19.0) 49.9% 61.3 ( 3.2)
100% ( 4) 9.3% 714.7 ( 9.5) 56.4% 53.3 ( 1.5)
On Josh's 96-CPU and 192G memory system:
Without this patch series:
[ 0.487132] node 0 initialised, 23398907 pages in 292ms
[ 0.499132] node 1 initialised, 24189223 pages in 304ms
...
[ 0.629376] Run /sbin/init as init process
With this patch series:
[ 0.231435] node 1 initialised, 24189223 pages in 32ms
[ 0.236718] node 0 initialised, 23398907 pages in 36ms
[1] https://static.sched.com/hosted_files/kvmforum2019/66/VMM-fast-restart_kvmforum2019.pdf
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Josh Triplett <josh@joshtriplett.org>
Reviewed-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Robert Elliott <elliott@hpe.com>
Cc: Shile Zhang <shile.zhang@linux.alibaba.com>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Link: http://lkml.kernel.org/r/20200527173608.2885243-7-daniel.m.jordan@oracle.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-03 22:59:51 +00:00
|
|
|
a subset of memmap at boot and then initialise the rest in parallel.
|
|
|
|
This has a potential performance impact on tasks running early in the
|
2016-02-05 23:36:21 +00:00
|
|
|
lifetime of the system until these kthreads finish the
|
|
|
|
initialisation.
|
2015-08-09 19:29:06 +00:00
|
|
|
|
mm/idle_page_tracking: make PG_idle reusable
PG_idle and PG_young allow the two PTE Accessed bit users, Idle Page
Tracking and the reclaim logic concurrently work while not interfering
with each other. That is, when they need to clear the Accessed bit, they
set PG_young to represent the previous state of the bit, respectively.
And when they need to read the bit, if the bit is cleared, they further
read the PG_young to know whether the other has cleared the bit meanwhile
or not.
For yet another user of the PTE Accessed bit, we could add another page
flag, or extend the mechanism to use the flags. For the DAMON usecase,
however, we don't need to do that just yet. IDLE_PAGE_TRACKING and DAMON
are mutually exclusive, so there's only ever going to be one user of the
current set of flags.
In this commit, we split out the CONFIG options to allow for the use of
PG_young and PG_idle outside of idle page tracking.
In the next commit, DAMON's reference implementation of the virtual memory
address space monitoring primitives will use it.
[sjpark@amazon.de: set PAGE_EXTENSION for non-64BIT]
Link: https://lkml.kernel.org/r/20210806095153.6444-1-sj38.park@gmail.com
[akpm@linux-foundation.org: tweak Kconfig text]
[sjpark@amazon.de: hide PAGE_IDLE_FLAG from users]
Link: https://lkml.kernel.org/r/20210813081238.34705-1-sj38.park@gmail.com
Link: https://lkml.kernel.org/r/20210716081449.22187-5-sj38.park@gmail.com
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Fernand Sieber <sieberf@amazon.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Amit Shah <amit@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: David Woodhouse <dwmw@amazon.com>
Cc: Fan Du <fan.du@intel.com>
Cc: Greg Kroah-Hartman <greg@kroah.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Leonard Foerster <foersleo@amazon.de>
Cc: Marco Elver <elver@google.com>
Cc: Markus Boehme <markubo@amazon.de>
Cc: Maximilian Heyne <mheyne@amazon.de>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-09-08 02:56:40 +00:00
|
|
|
config PAGE_IDLE_FLAG
|
|
|
|
bool
|
|
|
|
select PAGE_EXTENSION if !64BIT
|
|
|
|
help
|
|
|
|
This adds PG_idle and PG_young flags to 'struct page'. PTE Accessed
|
|
|
|
bit writers can set the state of the bit in the flags so that PTE
|
|
|
|
Accessed bit readers may avoid disturbance.
|
|
|
|
|
mm: introduce idle page tracking
Knowing the portion of memory that is not used by a certain application or
memory cgroup (idle memory) can be useful for partitioning the system
efficiently, e.g. by setting memory cgroup limits appropriately.
Currently, the only means to estimate the amount of idle memory provided
by the kernel is /proc/PID/{clear_refs,smaps}: the user can clear the
access bit for all pages mapped to a particular process by writing 1 to
clear_refs, wait for some time, and then count smaps:Referenced. However,
this method has two serious shortcomings:
- it does not count unmapped file pages
- it affects the reclaimer logic
To overcome these drawbacks, this patch introduces two new page flags,
Idle and Young, and a new sysfs file, /sys/kernel/mm/page_idle/bitmap.
A page's Idle flag can only be set from userspace by setting bit in
/sys/kernel/mm/page_idle/bitmap at the offset corresponding to the page,
and it is cleared whenever the page is accessed either through page tables
(it is cleared in page_referenced() in this case) or using the read(2)
system call (mark_page_accessed()). Thus by setting the Idle flag for
pages of a particular workload, which can be found e.g. by reading
/proc/PID/pagemap, waiting for some time to let the workload access its
working set, and then reading the bitmap file, one can estimate the amount
of pages that are not used by the workload.
The Young page flag is used to avoid interference with the memory
reclaimer. A page's Young flag is set whenever the Access bit of a page
table entry pointing to the page is cleared by writing to the bitmap file.
If page_referenced() is called on a Young page, it will add 1 to its
return value, therefore concealing the fact that the Access bit was
cleared.
Note, since there is no room for extra page flags on 32 bit, this feature
uses extended page flags when compiled on 32 bit.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: kpageidle requires an MMU]
[akpm@linux-foundation.org: decouple from page-flags rework]
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 22:35:45 +00:00
|
|
|
config IDLE_PAGE_TRACKING
|
|
|
|
bool "Enable idle page tracking"
|
|
|
|
depends on SYSFS && MMU
|
mm/idle_page_tracking: make PG_idle reusable
PG_idle and PG_young allow the two PTE Accessed bit users, Idle Page
Tracking and the reclaim logic concurrently work while not interfering
with each other. That is, when they need to clear the Accessed bit, they
set PG_young to represent the previous state of the bit, respectively.
And when they need to read the bit, if the bit is cleared, they further
read the PG_young to know whether the other has cleared the bit meanwhile
or not.
For yet another user of the PTE Accessed bit, we could add another page
flag, or extend the mechanism to use the flags. For the DAMON usecase,
however, we don't need to do that just yet. IDLE_PAGE_TRACKING and DAMON
are mutually exclusive, so there's only ever going to be one user of the
current set of flags.
In this commit, we split out the CONFIG options to allow for the use of
PG_young and PG_idle outside of idle page tracking.
In the next commit, DAMON's reference implementation of the virtual memory
address space monitoring primitives will use it.
[sjpark@amazon.de: set PAGE_EXTENSION for non-64BIT]
Link: https://lkml.kernel.org/r/20210806095153.6444-1-sj38.park@gmail.com
[akpm@linux-foundation.org: tweak Kconfig text]
[sjpark@amazon.de: hide PAGE_IDLE_FLAG from users]
Link: https://lkml.kernel.org/r/20210813081238.34705-1-sj38.park@gmail.com
Link: https://lkml.kernel.org/r/20210716081449.22187-5-sj38.park@gmail.com
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Fernand Sieber <sieberf@amazon.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Amit Shah <amit@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: David Woodhouse <dwmw@amazon.com>
Cc: Fan Du <fan.du@intel.com>
Cc: Greg Kroah-Hartman <greg@kroah.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Leonard Foerster <foersleo@amazon.de>
Cc: Marco Elver <elver@google.com>
Cc: Markus Boehme <markubo@amazon.de>
Cc: Maximilian Heyne <mheyne@amazon.de>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-09-08 02:56:40 +00:00
|
|
|
select PAGE_IDLE_FLAG
|
mm: introduce idle page tracking
Knowing the portion of memory that is not used by a certain application or
memory cgroup (idle memory) can be useful for partitioning the system
efficiently, e.g. by setting memory cgroup limits appropriately.
Currently, the only means to estimate the amount of idle memory provided
by the kernel is /proc/PID/{clear_refs,smaps}: the user can clear the
access bit for all pages mapped to a particular process by writing 1 to
clear_refs, wait for some time, and then count smaps:Referenced. However,
this method has two serious shortcomings:
- it does not count unmapped file pages
- it affects the reclaimer logic
To overcome these drawbacks, this patch introduces two new page flags,
Idle and Young, and a new sysfs file, /sys/kernel/mm/page_idle/bitmap.
A page's Idle flag can only be set from userspace by setting bit in
/sys/kernel/mm/page_idle/bitmap at the offset corresponding to the page,
and it is cleared whenever the page is accessed either through page tables
(it is cleared in page_referenced() in this case) or using the read(2)
system call (mark_page_accessed()). Thus by setting the Idle flag for
pages of a particular workload, which can be found e.g. by reading
/proc/PID/pagemap, waiting for some time to let the workload access its
working set, and then reading the bitmap file, one can estimate the amount
of pages that are not used by the workload.
The Young page flag is used to avoid interference with the memory
reclaimer. A page's Young flag is set whenever the Access bit of a page
table entry pointing to the page is cleared by writing to the bitmap file.
If page_referenced() is called on a Young page, it will add 1 to its
return value, therefore concealing the fact that the Access bit was
cleared.
Note, since there is no room for extra page flags on 32 bit, this feature
uses extended page flags when compiled on 32 bit.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: kpageidle requires an MMU]
[akpm@linux-foundation.org: decouple from page-flags rework]
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 22:35:45 +00:00
|
|
|
help
|
|
|
|
This feature allows to estimate the amount of user pages that have
|
|
|
|
not been touched during a given period of time. This information can
|
|
|
|
be useful to tune memory cgroup limits and/or for job placement
|
|
|
|
within a compute cluster.
|
|
|
|
|
2018-04-18 08:07:49 +00:00
|
|
|
See Documentation/admin-guide/mm/idle_page_tracking.rst for
|
|
|
|
more details.
|
mm: introduce idle page tracking
Knowing the portion of memory that is not used by a certain application or
memory cgroup (idle memory) can be useful for partitioning the system
efficiently, e.g. by setting memory cgroup limits appropriately.
Currently, the only means to estimate the amount of idle memory provided
by the kernel is /proc/PID/{clear_refs,smaps}: the user can clear the
access bit for all pages mapped to a particular process by writing 1 to
clear_refs, wait for some time, and then count smaps:Referenced. However,
this method has two serious shortcomings:
- it does not count unmapped file pages
- it affects the reclaimer logic
To overcome these drawbacks, this patch introduces two new page flags,
Idle and Young, and a new sysfs file, /sys/kernel/mm/page_idle/bitmap.
A page's Idle flag can only be set from userspace by setting bit in
/sys/kernel/mm/page_idle/bitmap at the offset corresponding to the page,
and it is cleared whenever the page is accessed either through page tables
(it is cleared in page_referenced() in this case) or using the read(2)
system call (mark_page_accessed()). Thus by setting the Idle flag for
pages of a particular workload, which can be found e.g. by reading
/proc/PID/pagemap, waiting for some time to let the workload access its
working set, and then reading the bitmap file, one can estimate the amount
of pages that are not used by the workload.
The Young page flag is used to avoid interference with the memory
reclaimer. A page's Young flag is set whenever the Access bit of a page
table entry pointing to the page is cleared by writing to the bitmap file.
If page_referenced() is called on a Young page, it will add 1 to its
return value, therefore concealing the fact that the Access bit was
cleared.
Note, since there is no room for extra page flags on 32 bit, this feature
uses extended page flags when compiled on 32 bit.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: kpageidle requires an MMU]
[akpm@linux-foundation.org: decouple from page-flags rework]
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 22:35:45 +00:00
|
|
|
|
2021-05-05 01:38:09 +00:00
|
|
|
config ARCH_HAS_CACHE_LINE_SIZE
|
|
|
|
bool
|
|
|
|
|
2022-02-16 20:05:28 +00:00
|
|
|
config ARCH_HAS_CURRENT_STACK_POINTER
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
In support of HARDENED_USERCOPY performing stack variable lifetime
|
|
|
|
checking, an architecture-agnostic way to find the stack pointer
|
|
|
|
is needed. Once an architecture defines an unsigned long global
|
|
|
|
register alias named "current_stack_pointer", this config can be
|
|
|
|
selected.
|
|
|
|
|
2019-07-16 23:30:47 +00:00
|
|
|
config ARCH_HAS_PTE_DEVMAP
|
2017-06-28 01:32:31 +00:00
|
|
|
bool
|
|
|
|
|
2021-07-01 01:52:20 +00:00
|
|
|
config ARCH_HAS_ZONE_DMA_SET
|
|
|
|
bool
|
|
|
|
|
|
|
|
config ZONE_DMA
|
|
|
|
bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
|
|
|
|
default y if ARM64 || X86
|
|
|
|
|
|
|
|
config ZONE_DMA32
|
|
|
|
bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
|
|
|
|
depends on !X86_32
|
|
|
|
default y if ARM64
|
|
|
|
|
2015-08-09 19:29:06 +00:00
|
|
|
config ZONE_DEVICE
|
2017-09-08 23:11:43 +00:00
|
|
|
bool "Device memory (pmem, HMM, etc...) hotplug support"
|
2015-08-09 19:29:06 +00:00
|
|
|
depends on MEMORY_HOTPLUG
|
|
|
|
depends on MEMORY_HOTREMOVE
|
2016-03-17 21:19:58 +00:00
|
|
|
depends on SPARSEMEM_VMEMMAP
|
2019-07-16 23:30:47 +00:00
|
|
|
depends on ARCH_HAS_PTE_DEVMAP
|
2018-09-22 20:14:30 +00:00
|
|
|
select XARRAY_MULTI
|
2015-08-09 19:29:06 +00:00
|
|
|
|
|
|
|
help
|
|
|
|
Device memory hotplug support allows for establishing pmem,
|
|
|
|
or other device driver discovered memory regions, in the
|
|
|
|
memmap. This allows pfn_to_page() lookups of otherwise
|
|
|
|
"device-physical" addresses which is needed for using a DAX
|
|
|
|
mapping in an O_DIRECT operation, among other things.
|
|
|
|
|
|
|
|
If FS_DAX is enabled, then say Y.
|
2015-09-11 23:42:39 +00:00
|
|
|
|
2019-08-06 16:05:52 +00:00
|
|
|
#
|
|
|
|
# Helpers to mirror range of the CPU page tables of a process into device page
|
|
|
|
# tables.
|
|
|
|
#
|
2017-09-08 23:11:27 +00:00
|
|
|
config HMM_MIRROR
|
2019-08-06 16:05:52 +00:00
|
|
|
bool
|
2019-08-06 16:05:51 +00:00
|
|
|
depends on MMU
|
2017-09-08 23:11:27 +00:00
|
|
|
|
2022-05-20 20:41:24 +00:00
|
|
|
config GET_FREE_REGION
|
|
|
|
depends on SPARSEMEM
|
|
|
|
bool
|
|
|
|
|
2017-09-08 23:11:43 +00:00
|
|
|
config DEVICE_PRIVATE
|
|
|
|
bool "Unaddressable device memory (GPU memory, ...)"
|
2019-06-26 12:27:22 +00:00
|
|
|
depends on ZONE_DEVICE
|
2022-05-20 20:41:24 +00:00
|
|
|
select GET_FREE_REGION
|
2017-09-08 23:11:43 +00:00
|
|
|
|
|
|
|
help
|
|
|
|
Allows creation of struct pages to represent unaddressable device
|
|
|
|
memory; i.e., memory that is only accessible from the device (or
|
|
|
|
group of devices). You likely also want to select HMM_MIRROR.
|
|
|
|
|
2020-10-17 23:15:10 +00:00
|
|
|
config VMAP_PFN
|
|
|
|
bool
|
|
|
|
|
2016-02-12 21:02:08 +00:00
|
|
|
config ARCH_USES_HIGH_VMA_FLAGS
|
|
|
|
bool
|
2016-02-12 21:02:32 +00:00
|
|
|
config ARCH_HAS_PKEYS
|
|
|
|
bool
|
2017-06-19 23:28:31 +00:00
|
|
|
|
2022-05-25 11:25:59 +00:00
|
|
|
config VM_EVENT_COUNTERS
|
|
|
|
default y
|
|
|
|
bool "Enable VM event counters for /proc/vmstat" if EXPERT
|
|
|
|
help
|
|
|
|
VM event counters are needed for event counts to be shown.
|
|
|
|
This option allows the disabling of the VM event counters
|
|
|
|
on EXPERT systems. /proc/vmstat will only show page counts
|
|
|
|
if VM event counters are disabled.
|
|
|
|
|
2017-06-19 23:28:31 +00:00
|
|
|
config PERCPU_STATS
|
|
|
|
bool "Collect percpu memory statistics"
|
|
|
|
help
|
|
|
|
This feature collects and exposes statistics via debugfs. The
|
|
|
|
information includes global and per chunk statistics, which can
|
|
|
|
be used to help understand percpu memory usage.
|
2017-11-17 23:31:22 +00:00
|
|
|
|
mm/gup_benchmark: rename to mm/gup_test
Patch series "selftests/vm: gup_test, hmm-tests, assorted improvements", v3.
Summary: This series provides two main things, and a number of smaller
supporting goodies. The two main points are:
1) Add a new sub-test to gup_test, which in turn is a renamed version
of gup_benchmark. This sub-test allows nicer testing of dump_pages(),
at least on user-space pages.
For quite a while, I was doing a quick hack to gup_test.c whenever I
wanted to try out changes to dump_page(). Then Matthew Wilcox asked me
what I meant when I said "I used my dump_page() unit test", and I
realized that it might be nice to check in a polished up version of
that.
Details about how it works and how to use it are in the commit
description for patch #6 ("selftests/vm: gup_test: introduce the
dump_pages() sub-test").
2) Fixes a limitation of hmm-tests: these tests are incredibly useful,
but only if people actually build and run them. And it turns out that
libhugetlbfs is a little too effective at throwing a wrench in the
works, there. So I've added a little configuration check that removes
just two of the 21 hmm-tests, if libhugetlbfs is not available.
Further details in the commit description of patch #8
("selftests/vm: hmm-tests: remove the libhugetlbfs dependency").
Other smaller things that this series does:
a) Remove code duplication by creating gup_test.h.
b) Clear up the sub-test organization, and their invocation within
run_vmtests.sh.
c) Other minor assorted improvements.
[1] v2 is here:
https://lore.kernel.org/linux-doc/20200929212747.251804-1-jhubbard@nvidia.com/
[2] https://lore.kernel.org/r/CAHk-=wgh-TMPHLY3jueHX7Y2fWh3D+nMBqVS__AZm6-oorquWA@mail.gmail.com
This patch (of 9):
Rename nearly every "gup_benchmark" reference and file name to "gup_test".
The one exception is for the actual gup benchmark test itself.
The current code already does a *little* bit more than benchmarking, and
definitely covers more than get_user_pages_fast(). More importantly,
however, subsequent patches are about to add some functionality that is
non-benchmark related.
Closely related changes:
* Kconfig: in addition to renaming the options from GUP_BENCHMARK to
GUP_TEST, update the help text to reflect that it's no longer a
benchmark-only test.
Link: https://lkml.kernel.org/r/20201026064021.3545418-1-jhubbard@nvidia.com
Link: https://lkml.kernel.org/r/20201026064021.3545418-2-jhubbard@nvidia.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-15 03:05:05 +00:00
|
|
|
config GUP_TEST
|
|
|
|
bool "Enable infrastructure for get_user_pages()-related unit tests"
|
2020-12-15 03:05:38 +00:00
|
|
|
depends on DEBUG_FS
|
2017-11-17 23:31:22 +00:00
|
|
|
help
|
mm/gup_benchmark: rename to mm/gup_test
Patch series "selftests/vm: gup_test, hmm-tests, assorted improvements", v3.
Summary: This series provides two main things, and a number of smaller
supporting goodies. The two main points are:
1) Add a new sub-test to gup_test, which in turn is a renamed version
of gup_benchmark. This sub-test allows nicer testing of dump_pages(),
at least on user-space pages.
For quite a while, I was doing a quick hack to gup_test.c whenever I
wanted to try out changes to dump_page(). Then Matthew Wilcox asked me
what I meant when I said "I used my dump_page() unit test", and I
realized that it might be nice to check in a polished up version of
that.
Details about how it works and how to use it are in the commit
description for patch #6 ("selftests/vm: gup_test: introduce the
dump_pages() sub-test").
2) Fixes a limitation of hmm-tests: these tests are incredibly useful,
but only if people actually build and run them. And it turns out that
libhugetlbfs is a little too effective at throwing a wrench in the
works, there. So I've added a little configuration check that removes
just two of the 21 hmm-tests, if libhugetlbfs is not available.
Further details in the commit description of patch #8
("selftests/vm: hmm-tests: remove the libhugetlbfs dependency").
Other smaller things that this series does:
a) Remove code duplication by creating gup_test.h.
b) Clear up the sub-test organization, and their invocation within
run_vmtests.sh.
c) Other minor assorted improvements.
[1] v2 is here:
https://lore.kernel.org/linux-doc/20200929212747.251804-1-jhubbard@nvidia.com/
[2] https://lore.kernel.org/r/CAHk-=wgh-TMPHLY3jueHX7Y2fWh3D+nMBqVS__AZm6-oorquWA@mail.gmail.com
This patch (of 9):
Rename nearly every "gup_benchmark" reference and file name to "gup_test".
The one exception is for the actual gup benchmark test itself.
The current code already does a *little* bit more than benchmarking, and
definitely covers more than get_user_pages_fast(). More importantly,
however, subsequent patches are about to add some functionality that is
non-benchmark related.
Closely related changes:
* Kconfig: in addition to renaming the options from GUP_BENCHMARK to
GUP_TEST, update the help text to reflect that it's no longer a
benchmark-only test.
Link: https://lkml.kernel.org/r/20201026064021.3545418-1-jhubbard@nvidia.com
Link: https://lkml.kernel.org/r/20201026064021.3545418-2-jhubbard@nvidia.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-15 03:05:05 +00:00
|
|
|
Provides /sys/kernel/debug/gup_test, which in turn provides a way
|
|
|
|
to make ioctl calls that can launch kernel-based unit tests for
|
|
|
|
the get_user_pages*() and pin_user_pages*() family of API calls.
|
2017-11-17 23:31:22 +00:00
|
|
|
|
mm/gup_benchmark: rename to mm/gup_test
Patch series "selftests/vm: gup_test, hmm-tests, assorted improvements", v3.
Summary: This series provides two main things, and a number of smaller
supporting goodies. The two main points are:
1) Add a new sub-test to gup_test, which in turn is a renamed version
of gup_benchmark. This sub-test allows nicer testing of dump_pages(),
at least on user-space pages.
For quite a while, I was doing a quick hack to gup_test.c whenever I
wanted to try out changes to dump_page(). Then Matthew Wilcox asked me
what I meant when I said "I used my dump_page() unit test", and I
realized that it might be nice to check in a polished up version of
that.
Details about how it works and how to use it are in the commit
description for patch #6 ("selftests/vm: gup_test: introduce the
dump_pages() sub-test").
2) Fixes a limitation of hmm-tests: these tests are incredibly useful,
but only if people actually build and run them. And it turns out that
libhugetlbfs is a little too effective at throwing a wrench in the
works, there. So I've added a little configuration check that removes
just two of the 21 hmm-tests, if libhugetlbfs is not available.
Further details in the commit description of patch #8
("selftests/vm: hmm-tests: remove the libhugetlbfs dependency").
Other smaller things that this series does:
a) Remove code duplication by creating gup_test.h.
b) Clear up the sub-test organization, and their invocation within
run_vmtests.sh.
c) Other minor assorted improvements.
[1] v2 is here:
https://lore.kernel.org/linux-doc/20200929212747.251804-1-jhubbard@nvidia.com/
[2] https://lore.kernel.org/r/CAHk-=wgh-TMPHLY3jueHX7Y2fWh3D+nMBqVS__AZm6-oorquWA@mail.gmail.com
This patch (of 9):
Rename nearly every "gup_benchmark" reference and file name to "gup_test".
The one exception is for the actual gup benchmark test itself.
The current code already does a *little* bit more than benchmarking, and
definitely covers more than get_user_pages_fast(). More importantly,
however, subsequent patches are about to add some functionality that is
non-benchmark related.
Closely related changes:
* Kconfig: in addition to renaming the options from GUP_BENCHMARK to
GUP_TEST, update the help text to reflect that it's no longer a
benchmark-only test.
Link: https://lkml.kernel.org/r/20201026064021.3545418-1-jhubbard@nvidia.com
Link: https://lkml.kernel.org/r/20201026064021.3545418-2-jhubbard@nvidia.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-15 03:05:05 +00:00
|
|
|
These tests include benchmark testing of the _fast variants of
|
|
|
|
get_user_pages*() and pin_user_pages*(), as well as smoke tests of
|
|
|
|
the non-_fast variants.
|
|
|
|
|
selftests/vm: gup_test: introduce the dump_pages() sub-test
For quite a while, I was doing a quick hack to gup_test.c (previously,
gup_benchmark.c) whenever I wanted to try out my changes to dump_page().
This makes that hack unnecessary, and instead allows anyone to easily get
the same coverage from a user space program. That saves a lot of time
because you don't have to change the kernel, in order to test different
pages and options.
The new sub-test takes advantage of the existing gup_test infrastructure,
which already provides a simple user space program, some allocated user
space pages, an ioctl call, pinning of those pages (via either
get_user_pages or pin_user_pages) and a corresponding kernel-side test
invocation. There's not much more required, mainly just a couple of
inputs from the user.
In fact, the new test re-uses the existing command line options in order
to get various helpful combinations (THP or normal, _fast or slow gup, gup
vs. pup, and more).
New command line options are: which pages to dump, and what type of
"get/pin" to use.
In order to figure out which pages to dump, the logic is:
* If the user doesn't specify anything, the page 0 (the first page in
the address range that the program sets up for testing) is dumped.
* Or, the user can type up to 8 page indices anywhere on the command
line. If you type more than 8, then it uses the first 8 and ignores the
remaining items.
For example:
./gup_test -ct -F 1 0 19 0x1000
Meaning:
-c: dump pages sub-test
-t: use THP pages
-F 1: use pin_user_pages() instead of get_user_pages()
0 19 0x1000: dump pages 0, 19, and 4096
Link: https://lkml.kernel.org/r/20201026064021.3545418-7-jhubbard@nvidia.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-15 03:05:21 +00:00
|
|
|
There is also a sub-test that allows running dump_page() on any
|
|
|
|
of up to eight pages (selected by command line args) within the
|
|
|
|
range of user-space addresses. These pages are either pinned via
|
|
|
|
pin_user_pages*(), or pinned via get_user_pages*(), as specified
|
|
|
|
by other command line arguments.
|
|
|
|
|
mm/gup_benchmark: rename to mm/gup_test
Patch series "selftests/vm: gup_test, hmm-tests, assorted improvements", v3.
Summary: This series provides two main things, and a number of smaller
supporting goodies. The two main points are:
1) Add a new sub-test to gup_test, which in turn is a renamed version
of gup_benchmark. This sub-test allows nicer testing of dump_pages(),
at least on user-space pages.
For quite a while, I was doing a quick hack to gup_test.c whenever I
wanted to try out changes to dump_page(). Then Matthew Wilcox asked me
what I meant when I said "I used my dump_page() unit test", and I
realized that it might be nice to check in a polished up version of
that.
Details about how it works and how to use it are in the commit
description for patch #6 ("selftests/vm: gup_test: introduce the
dump_pages() sub-test").
2) Fixes a limitation of hmm-tests: these tests are incredibly useful,
but only if people actually build and run them. And it turns out that
libhugetlbfs is a little too effective at throwing a wrench in the
works, there. So I've added a little configuration check that removes
just two of the 21 hmm-tests, if libhugetlbfs is not available.
Further details in the commit description of patch #8
("selftests/vm: hmm-tests: remove the libhugetlbfs dependency").
Other smaller things that this series does:
a) Remove code duplication by creating gup_test.h.
b) Clear up the sub-test organization, and their invocation within
run_vmtests.sh.
c) Other minor assorted improvements.
[1] v2 is here:
https://lore.kernel.org/linux-doc/20200929212747.251804-1-jhubbard@nvidia.com/
[2] https://lore.kernel.org/r/CAHk-=wgh-TMPHLY3jueHX7Y2fWh3D+nMBqVS__AZm6-oorquWA@mail.gmail.com
This patch (of 9):
Rename nearly every "gup_benchmark" reference and file name to "gup_test".
The one exception is for the actual gup benchmark test itself.
The current code already does a *little* bit more than benchmarking, and
definitely covers more than get_user_pages_fast(). More importantly,
however, subsequent patches are about to add some functionality that is
non-benchmark related.
Closely related changes:
* Kconfig: in addition to renaming the options from GUP_BENCHMARK to
GUP_TEST, update the help text to reflect that it's no longer a
benchmark-only test.
Link: https://lkml.kernel.org/r/20201026064021.3545418-1-jhubbard@nvidia.com
Link: https://lkml.kernel.org/r/20201026064021.3545418-2-jhubbard@nvidia.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-15 03:05:05 +00:00
|
|
|
See tools/testing/selftests/vm/gup_test.c
|
2018-06-08 00:06:08 +00:00
|
|
|
|
2020-12-15 03:05:38 +00:00
|
|
|
comment "GUP_TEST needs to have DEBUG_FS enabled"
|
|
|
|
depends on !GUP_TEST && !DEBUG_FS
|
2018-06-08 00:06:08 +00:00
|
|
|
|
2019-07-12 03:56:49 +00:00
|
|
|
config GUP_GET_PTE_LOW_HIGH
|
|
|
|
bool
|
|
|
|
|
2018-06-08 00:06:08 +00:00
|
|
|
config ARCH_HAS_PTE_SPECIAL
|
|
|
|
bool
|
2018-07-31 11:39:35 +00:00
|
|
|
|
2019-07-12 03:57:28 +00:00
|
|
|
#
|
|
|
|
# Some architectures require a special hugepage directory format that is
|
|
|
|
# required to support multiple hugepage sizes. For example a4fe3ce76
|
|
|
|
# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
|
|
|
|
# introduced it on powerpc. This allows for a more flexible hugepage
|
|
|
|
# pagetable layouts.
|
|
|
|
#
|
|
|
|
config ARCH_HAS_HUGEPD
|
|
|
|
bool
|
|
|
|
|
2019-03-19 12:12:30 +00:00
|
|
|
config MAPPING_DIRTY_HELPERS
|
|
|
|
bool
|
|
|
|
|
2020-11-03 09:27:18 +00:00
|
|
|
config KMAP_LOCAL
|
|
|
|
bool
|
|
|
|
|
2021-11-20 00:43:55 +00:00
|
|
|
config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
|
|
|
|
bool
|
|
|
|
|
2021-04-30 05:57:32 +00:00
|
|
|
# struct io_mapping based helper. Selected by drivers that need them
|
|
|
|
config IO_MAPPING
|
|
|
|
bool
|
mm: introduce memfd_secret system call to create "secret" memory areas
Introduce "memfd_secret" system call with the ability to create memory
areas visible only in the context of the owning process and not mapped not
only to other processes but in the kernel page tables as well.
The secretmem feature is off by default and the user must explicitly
enable it at the boot time.
Once secretmem is enabled, the user will be able to create a file
descriptor using the memfd_secret() system call. The memory areas created
by mmap() calls from this file descriptor will be unmapped from the kernel
direct map and they will be only mapped in the page table of the processes
that have access to the file descriptor.
Secretmem is designed to provide the following protections:
* Enhanced protection (in conjunction with all the other in-kernel
attack prevention systems) against ROP attacks. Seceretmem makes
"simple" ROP insufficient to perform exfiltration, which increases the
required complexity of the attack. Along with other protections like
the kernel stack size limit and address space layout randomization which
make finding gadgets is really hard, absence of any in-kernel primitive
for accessing secret memory means the one gadget ROP attack can't work.
Since the only way to access secret memory is to reconstruct the missing
mapping entry, the attacker has to recover the physical page and insert
a PTE pointing to it in the kernel and then retrieve the contents. That
takes at least three gadgets which is a level of difficulty beyond most
standard attacks.
* Prevent cross-process secret userspace memory exposures. Once the
secret memory is allocated, the user can't accidentally pass it into the
kernel to be transmitted somewhere. The secreremem pages cannot be
accessed via the direct map and they are disallowed in GUP.
* Harden against exploited kernel flaws. In order to access secretmem,
a kernel-side attack would need to either walk the page tables and
create new ones, or spawn a new privileged uiserspace process to perform
secrets exfiltration using ptrace.
The file descriptor based memory has several advantages over the
"traditional" mm interfaces, such as mlock(), mprotect(), madvise(). File
descriptor approach allows explicit and controlled sharing of the memory
areas, it allows to seal the operations. Besides, file descriptor based
memory paves the way for VMMs to remove the secret memory range from the
userspace hipervisor process, for instance QEMU. Andy Lutomirski says:
"Getting fd-backed memory into a guest will take some possibly major
work in the kernel, but getting vma-backed memory into a guest without
mapping it in the host user address space seems much, much worse."
memfd_secret() is made a dedicated system call rather than an extension to
memfd_create() because it's purpose is to allow the user to create more
secure memory mappings rather than to simply allow file based access to
the memory. Nowadays a new system call cost is negligible while it is way
simpler for userspace to deal with a clear-cut system calls than with a
multiplexer or an overloaded syscall. Moreover, the initial
implementation of memfd_secret() is completely distinct from
memfd_create() so there is no much sense in overloading memfd_create() to
begin with. If there will be a need for code sharing between these
implementation it can be easily achieved without a need to adjust user
visible APIs.
The secret memory remains accessible in the process context using uaccess
primitives, but it is not exposed to the kernel otherwise; secret memory
areas are removed from the direct map and functions in the
follow_page()/get_user_page() family will refuse to return a page that
belongs to the secret memory area.
Once there will be a use case that will require exposing secretmem to the
kernel it will be an opt-in request in the system call flags so that user
would have to decide what data can be exposed to the kernel.
Removing of the pages from the direct map may cause its fragmentation on
architectures that use large pages to map the physical memory which
affects the system performance. However, the original Kconfig text for
CONFIG_DIRECT_GBPAGES said that gigabyte pages in the direct map "... can
improve the kernel's performance a tiny bit ..." (commit 00d1c5e05736
("x86: add gbpages switches")) and the recent report [1] showed that "...
although 1G mappings are a good default choice, there is no compelling
evidence that it must be the only choice". Hence, it is sufficient to
have secretmem disabled by default with the ability of a system
administrator to enable it at boot time.
Pages in the secretmem regions are unevictable and unmovable to avoid
accidental exposure of the sensitive data via swap or during page
migration.
Since the secretmem mappings are locked in memory they cannot exceed
RLIMIT_MEMLOCK. Since these mappings are already locked independently
from mlock(), an attempt to mlock()/munlock() secretmem range would fail
and mlockall()/munlockall() will ignore secretmem mappings.
However, unlike mlock()ed memory, secretmem currently behaves more like
long-term GUP: secretmem mappings are unmovable mappings directly consumed
by user space. With default limits, there is no excessive use of
secretmem and it poses no real problem in combination with
ZONE_MOVABLE/CMA, but in the future this should be addressed to allow
balanced use of large amounts of secretmem along with ZONE_MOVABLE/CMA.
A page that was a part of the secret memory area is cleared when it is
freed to ensure the data is not exposed to the next user of that page.
The following example demonstrates creation of a secret mapping (error
handling is omitted):
fd = memfd_secret(0);
ftruncate(fd, MAP_SIZE);
ptr = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
[1] https://lore.kernel.org/linux-mm/213b4567-46ce-f116-9cdf-bbd0c884eb3c@linux.intel.com/
[akpm@linux-foundation.org: suppress Kconfig whine]
Link: https://lkml.kernel.org/r/20210518072034.31572-5-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Hagen Paul Pfeifer <hagen@jauu.net>
Acked-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Palmer Dabbelt <palmerdabbelt@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tycho Andersen <tycho@tycho.ws>
Cc: Will Deacon <will@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: kernel test robot <lkp@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-07-08 01:08:03 +00:00
|
|
|
|
|
|
|
config SECRETMEM
|
|
|
|
def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED
|
|
|
|
|
mm: add a field to store names for private anonymous memory
In many userspace applications, and especially in VM based applications
like Android uses heavily, there are multiple different allocators in
use. At a minimum there is libc malloc and the stack, and in many cases
there are libc malloc, the stack, direct syscalls to mmap anonymous
memory, and multiple VM heaps (one for small objects, one for big
objects, etc.). Each of these layers usually has its own tools to
inspect its usage; malloc by compiling a debug version, the VM through
heap inspection tools, and for direct syscalls there is usually no way
to track them.
On Android we heavily use a set of tools that use an extended version of
the logic covered in Documentation/vm/pagemap.txt to walk all pages
mapped in userspace and slice their usage by process, shared (COW) vs.
unique mappings, backing, etc. This can account for real physical
memory usage even in cases like fork without exec (which Android uses
heavily to share as many private COW pages as possible between
processes), Kernel SamePage Merging, and clean zero pages. It produces
a measurement of the pages that only exist in that process (USS, for
unique), and a measurement of the physical memory usage of that process
with the cost of shared pages being evenly split between processes that
share them (PSS).
If all anonymous memory is indistinguishable then figuring out the real
physical memory usage (PSS) of each heap requires either a pagemap
walking tool that can understand the heap debugging of every layer, or
for every layer's heap debugging tools to implement the pagemap walking
logic, in which case it is hard to get a consistent view of memory
across the whole system.
Tracking the information in userspace leads to all sorts of problems.
It either needs to be stored inside the process, which means every
process has to have an API to export its current heap information upon
request, or it has to be stored externally in a filesystem that somebody
needs to clean up on crashes. It needs to be readable while the process
is still running, so it has to have some sort of synchronization with
every layer of userspace. Efficiently tracking the ranges requires
reimplementing something like the kernel vma trees, and linking to it
from every layer of userspace. It requires more memory, more syscalls,
more runtime cost, and more complexity to separately track regions that
the kernel is already tracking.
This patch adds a field to /proc/pid/maps and /proc/pid/smaps to show a
userspace-provided name for anonymous vmas. The names of named
anonymous vmas are shown in /proc/pid/maps and /proc/pid/smaps as
[anon:<name>].
Userspace can set the name for a region of memory by calling
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, (unsigned long)name)
Setting the name to NULL clears it. The name length limit is 80 bytes
including NUL-terminator and is checked to contain only printable ascii
characters (including space), except '[',']','\','$' and '`'.
Ascii strings are being used to have a descriptive identifiers for vmas,
which can be understood by the users reading /proc/pid/maps or
/proc/pid/smaps. Names can be standardized for a given system and they
can include some variable parts such as the name of the allocator or a
library, tid of the thread using it, etc.
The name is stored in a pointer in the shared union in vm_area_struct
that points to a null terminated string. Anonymous vmas with the same
name (equivalent strings) and are otherwise mergeable will be merged.
The name pointers are not shared between vmas even if they contain the
same name. The name pointer is stored in a union with fields that are
only used on file-backed mappings, so it does not increase memory usage.
CONFIG_ANON_VMA_NAME kernel configuration is introduced to enable this
feature. It keeps the feature disabled by default to prevent any
additional memory overhead and to avoid confusing procfs parsers on
systems which are not ready to support named anonymous vmas.
The patch is based on the original patch developed by Colin Cross, more
specifically on its latest version [1] posted upstream by Sumit Semwal.
It used a userspace pointer to store vma names. In that design, name
pointers could be shared between vmas. However during the last
upstreaming attempt, Kees Cook raised concerns [2] about this approach
and suggested to copy the name into kernel memory space, perform
validity checks [3] and store as a string referenced from
vm_area_struct.
One big concern is about fork() performance which would need to strdup
anonymous vma names. Dave Hansen suggested experimenting with
worst-case scenario of forking a process with 64k vmas having longest
possible names [4]. I ran this experiment on an ARM64 Android device
and recorded a worst-case regression of almost 40% when forking such a
process.
This regression is addressed in the followup patch which replaces the
pointer to a name with a refcounted structure that allows sharing the
name pointer between vmas of the same name. Instead of duplicating the
string during fork() or when splitting a vma it increments the refcount.
[1] https://lore.kernel.org/linux-mm/20200901161459.11772-4-sumit.semwal@linaro.org/
[2] https://lore.kernel.org/linux-mm/202009031031.D32EF57ED@keescook/
[3] https://lore.kernel.org/linux-mm/202009031022.3834F692@keescook/
[4] https://lore.kernel.org/linux-mm/5d0358ab-8c47-2f5f-8e43-23b89d6a8e95@intel.com/
Changes for prctl(2) manual page (in the options section):
PR_SET_VMA
Sets an attribute specified in arg2 for virtual memory areas
starting from the address specified in arg3 and spanning the
size specified in arg4. arg5 specifies the value of the attribute
to be set. Note that assigning an attribute to a virtual memory
area might prevent it from being merged with adjacent virtual
memory areas due to the difference in that attribute's value.
Currently, arg2 must be one of:
PR_SET_VMA_ANON_NAME
Set a name for anonymous virtual memory areas. arg5 should
be a pointer to a null-terminated string containing the
name. The name length including null byte cannot exceed
80 bytes. If arg5 is NULL, the name of the appropriate
anonymous virtual memory areas will be reset. The name
can contain only printable ascii characters (including
space), except '[',']','\','$' and '`'.
This feature is available only if the kernel is built with
the CONFIG_ANON_VMA_NAME option enabled.
[surenb@google.com: docs: proc.rst: /proc/PID/maps: fix malformed table]
Link: https://lkml.kernel.org/r/20211123185928.2513763-1-surenb@google.com
[surenb: rebased over v5.15-rc6, replaced userpointer with a kernel copy,
added input sanitization and CONFIG_ANON_VMA_NAME config. The bulk of the
work here was done by Colin Cross, therefore, with his permission, keeping
him as the author]
Link: https://lkml.kernel.org/r/20211019215511.3771969-2-surenb@google.com
Signed-off-by: Colin Cross <ccross@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jan Glauber <jan.glauber@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rob Landley <rob@landley.net>
Cc: "Serge E. Hallyn" <serge.hallyn@ubuntu.com>
Cc: Shaohua Li <shli@fusionio.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-01-14 22:05:59 +00:00
|
|
|
config ANON_VMA_NAME
|
|
|
|
bool "Anonymous VMA name support"
|
|
|
|
depends on PROC_FS && ADVISE_SYSCALLS && MMU
|
|
|
|
|
|
|
|
help
|
|
|
|
Allow naming anonymous virtual memory areas.
|
|
|
|
|
|
|
|
This feature allows assigning names to virtual memory areas. Assigned
|
|
|
|
names can be later retrieved from /proc/pid/maps and /proc/pid/smaps
|
|
|
|
and help identifying individual anonymous memory areas.
|
|
|
|
Assigning a name to anonymous virtual memory area might prevent that
|
|
|
|
area from being merged with adjacent virtual memory areas due to the
|
|
|
|
difference in their name.
|
|
|
|
|
2022-05-13 03:22:56 +00:00
|
|
|
config USERFAULTFD
|
|
|
|
bool "Enable userfaultfd() system call"
|
|
|
|
depends on MMU
|
|
|
|
help
|
|
|
|
Enable the userfaultfd() system call that allows to intercept and
|
|
|
|
handle page faults in userland.
|
|
|
|
|
|
|
|
config HAVE_ARCH_USERFAULTFD_WP
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Arch has userfaultfd write protection support
|
|
|
|
|
|
|
|
config HAVE_ARCH_USERFAULTFD_MINOR
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Arch has userfaultfd minor fault support
|
|
|
|
|
mm: introduce PTE_MARKER swap entry
Patch series "userfaultfd-wp: Support shmem and hugetlbfs", v8.
Overview
========
Userfaultfd-wp anonymous support was merged two years ago. There're quite
a few applications that started to leverage this capability either to take
snapshots for user-app memory, or use it for full user controled swapping.
This series tries to complete the feature for uffd-wp so as to cover all
the RAM-based memory types. So far uffd-wp is the only missing piece of
the rest features (uffd-missing & uffd-minor mode).
One major reason to do so is that anonymous pages are sometimes not
satisfying the need of applications, and there're growing users of either
shmem and hugetlbfs for either sharing purpose (e.g., sharing guest mem
between hypervisor process and device emulation process, shmem local live
migration for upgrades), or for performance on tlb hits.
All these mean that if a uffd-wp app wants to switch to any of the memory
types, it'll stop working. I think it's worthwhile to have the kernel to
cover all these aspects.
This series chose to protect pages in pte level not page level.
One major reason is safety. I have no idea how we could make it safe if
any of the uffd-privileged app can wr-protect a page that any other
application can use. It means this app can block any process potentially
for any time it wants.
The other reason is that it aligns very well with not only the anonymous
uffd-wp solution, but also uffd as a whole. For example, userfaultfd is
implemented fundamentally based on VMAs. We set flags to VMAs showing the
status of uffd tracking. For another per-page based protection solution,
it'll be crossing the fundation line on VMA-based, and it could simply be
too far away already from what's called userfaultfd.
PTE markers
===========
The patchset is based on the idea called PTE markers. It was discussed in
one of the mm alignment sessions, proposed starting from v6, and this is
the 2nd version of it using PTE marker idea.
PTE marker is a new type of swap entry that is ony applicable to file
backed memories like shmem and hugetlbfs. It's used to persist some
pte-level information even if the original present ptes in pgtable are
zapped.
Logically pte markers can store more than uffd-wp information, but so far
only one bit is used for uffd-wp purpose. When the pte marker is
installed with uffd-wp bit set, it means this pte is wr-protected by uffd.
It solves the problem on e.g. file-backed memory mapped ptes got zapped
due to any reason (e.g. thp split, or swapped out), we can still keep the
wr-protect information in the ptes. Then when the page fault triggers
again, we'll know this pte is wr-protected so we can treat the pte the
same as a normal uffd wr-protected pte.
The extra information is encoded into the swap entry, or swp_offset to be
explicit, with the swp_type being PTE_MARKER. So far uffd-wp only uses
one bit out of the swap entry, the rest bits of swp_offset are still
reserved for other purposes.
There're two configs to enable/disable PTE markers:
CONFIG_PTE_MARKER
CONFIG_PTE_MARKER_UFFD_WP
We can set !PTE_MARKER to completely disable all the PTE markers, along
with uffd-wp support. I made two config so we can also enable PTE marker
but disable uffd-wp file-backed for other purposes. At the end of current
series, I'll enable CONFIG_PTE_MARKER by default, but that patch is
standalone and if anyone worries about having it by default, we can also
consider turn it off by dropping that oneliner patch. So far I don't see
a huge risk of doing so, so I kept that patch.
In most cases, PTE markers should be treated as none ptes. It is because
that unlike most of the other swap entry types, there's no PFN or block
offset information encoded into PTE markers but some extra well-defined
bits showing the status of the pte. These bits should only be used as
extra data when servicing an upcoming page fault, and then we behave as if
it's a none pte.
I did spend a lot of time observing all the pte_none() users this time.
It is indeed a challenge because there're a lot, and I hope I didn't miss
a single of them when we should take care of pte markers. Luckily, I
don't think it'll need to be considered in many cases, for example: boot
code, arch code (especially non-x86), kernel-only page handlings (e.g.
CPA), or device driver codes when we're tackling with pure PFN mappings.
I introduced pte_none_mostly() in this series when we need to handle pte
markers the same as none pte, the "mostly" is the other way to write
"either none pte or a pte marker".
I didn't replace pte_none() to cover pte markers for below reasons:
- Very rare case of pte_none() callers will handle pte markers. E.g., all
the kernel pages do not require knowledge of pte markers. So we don't
pollute the major use cases.
- Unconditionally change pte_none() semantics could confuse people, because
pte_none() existed for so long a time.
- Unconditionally change pte_none() semantics could make pte_none() slower
even if in many cases pte markers do not exist.
- There're cases where we'd like to handle pte markers differntly from
pte_none(), so a full replace is also impossible. E.g. khugepaged should
still treat pte markers as normal swap ptes rather than none ptes, because
pte markers will always need a fault-in to merge the marker with a valid
pte. Or the smap code will need to parse PTE markers not none ptes.
Patch Layout
============
Introducing PTE marker and uffd-wp bit in PTE marker:
mm: Introduce PTE_MARKER swap entry
mm: Teach core mm about pte markers
mm: Check against orig_pte for finish_fault()
mm/uffd: PTE_MARKER_UFFD_WP
Adding support for shmem uffd-wp:
mm/shmem: Take care of UFFDIO_COPY_MODE_WP
mm/shmem: Handle uffd-wp special pte in page fault handler
mm/shmem: Persist uffd-wp bit across zapping for file-backed
mm/shmem: Allow uffd wr-protect none pte for file-backed mem
mm/shmem: Allows file-back mem to be uffd wr-protected on thps
mm/shmem: Handle uffd-wp during fork()
Adding support for hugetlbfs uffd-wp:
mm/hugetlb: Introduce huge pte version of uffd-wp helpers
mm/hugetlb: Hook page faults for uffd write protection
mm/hugetlb: Take care of UFFDIO_COPY_MODE_WP
mm/hugetlb: Handle UFFDIO_WRITEPROTECT
mm/hugetlb: Handle pte markers in page faults
mm/hugetlb: Allow uffd wr-protect none ptes
mm/hugetlb: Only drop uffd-wp special pte if required
mm/hugetlb: Handle uffd-wp during fork()
Misc handling on the rest mm for uffd-wp file-backed:
mm/khugepaged: Don't recycle vma pgtable if uffd-wp registered
mm/pagemap: Recognize uffd-wp bit for shmem/hugetlbfs
Enabling of uffd-wp on file-backed memory:
mm/uffd: Enable write protection for shmem & hugetlbfs
mm: Enable PTE markers by default
selftests/uffd: Enable uffd-wp for shmem/hugetlbfs
Tests
=====
- Compile test on x86_64 and aarch64 on different configs
- Kernel selftests
- uffd-test [0]
- Umapsort [1,2] test for shmem/hugetlb, with swap on/off
[0] https://github.com/xzpeter/clibs/tree/master/uffd-test
[1] https://github.com/xzpeter/umap-apps/tree/peter
[2] https://github.com/xzpeter/umap/tree/peter-shmem-hugetlbfs
This patch (of 23):
Introduces a new swap entry type called PTE_MARKER. It can be installed
for any pte that maps a file-backed memory when the pte is temporarily
zapped, so as to maintain per-pte information.
The information that kept in the pte is called a "marker". Here we define
the marker as "unsigned long" just to match pgoff_t, however it will only
work if it still fits in swp_offset(), which is e.g. currently 58 bits on
x86_64.
A new config CONFIG_PTE_MARKER is introduced too; it's by default off. A
bunch of helpers are defined altogether to service the rest of the pte
marker code.
[peterx@redhat.com: fixup]
Link: https://lkml.kernel.org/r/Yk2rdB7SXZf+2BDF@xz-m1.local
Link: https://lkml.kernel.org/r/20220405014646.13522-1-peterx@redhat.com
Link: https://lkml.kernel.org/r/20220405014646.13522-2-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-05-13 03:22:52 +00:00
|
|
|
config PTE_MARKER
|
2022-05-13 03:22:56 +00:00
|
|
|
bool
|
mm: introduce PTE_MARKER swap entry
Patch series "userfaultfd-wp: Support shmem and hugetlbfs", v8.
Overview
========
Userfaultfd-wp anonymous support was merged two years ago. There're quite
a few applications that started to leverage this capability either to take
snapshots for user-app memory, or use it for full user controled swapping.
This series tries to complete the feature for uffd-wp so as to cover all
the RAM-based memory types. So far uffd-wp is the only missing piece of
the rest features (uffd-missing & uffd-minor mode).
One major reason to do so is that anonymous pages are sometimes not
satisfying the need of applications, and there're growing users of either
shmem and hugetlbfs for either sharing purpose (e.g., sharing guest mem
between hypervisor process and device emulation process, shmem local live
migration for upgrades), or for performance on tlb hits.
All these mean that if a uffd-wp app wants to switch to any of the memory
types, it'll stop working. I think it's worthwhile to have the kernel to
cover all these aspects.
This series chose to protect pages in pte level not page level.
One major reason is safety. I have no idea how we could make it safe if
any of the uffd-privileged app can wr-protect a page that any other
application can use. It means this app can block any process potentially
for any time it wants.
The other reason is that it aligns very well with not only the anonymous
uffd-wp solution, but also uffd as a whole. For example, userfaultfd is
implemented fundamentally based on VMAs. We set flags to VMAs showing the
status of uffd tracking. For another per-page based protection solution,
it'll be crossing the fundation line on VMA-based, and it could simply be
too far away already from what's called userfaultfd.
PTE markers
===========
The patchset is based on the idea called PTE markers. It was discussed in
one of the mm alignment sessions, proposed starting from v6, and this is
the 2nd version of it using PTE marker idea.
PTE marker is a new type of swap entry that is ony applicable to file
backed memories like shmem and hugetlbfs. It's used to persist some
pte-level information even if the original present ptes in pgtable are
zapped.
Logically pte markers can store more than uffd-wp information, but so far
only one bit is used for uffd-wp purpose. When the pte marker is
installed with uffd-wp bit set, it means this pte is wr-protected by uffd.
It solves the problem on e.g. file-backed memory mapped ptes got zapped
due to any reason (e.g. thp split, or swapped out), we can still keep the
wr-protect information in the ptes. Then when the page fault triggers
again, we'll know this pte is wr-protected so we can treat the pte the
same as a normal uffd wr-protected pte.
The extra information is encoded into the swap entry, or swp_offset to be
explicit, with the swp_type being PTE_MARKER. So far uffd-wp only uses
one bit out of the swap entry, the rest bits of swp_offset are still
reserved for other purposes.
There're two configs to enable/disable PTE markers:
CONFIG_PTE_MARKER
CONFIG_PTE_MARKER_UFFD_WP
We can set !PTE_MARKER to completely disable all the PTE markers, along
with uffd-wp support. I made two config so we can also enable PTE marker
but disable uffd-wp file-backed for other purposes. At the end of current
series, I'll enable CONFIG_PTE_MARKER by default, but that patch is
standalone and if anyone worries about having it by default, we can also
consider turn it off by dropping that oneliner patch. So far I don't see
a huge risk of doing so, so I kept that patch.
In most cases, PTE markers should be treated as none ptes. It is because
that unlike most of the other swap entry types, there's no PFN or block
offset information encoded into PTE markers but some extra well-defined
bits showing the status of the pte. These bits should only be used as
extra data when servicing an upcoming page fault, and then we behave as if
it's a none pte.
I did spend a lot of time observing all the pte_none() users this time.
It is indeed a challenge because there're a lot, and I hope I didn't miss
a single of them when we should take care of pte markers. Luckily, I
don't think it'll need to be considered in many cases, for example: boot
code, arch code (especially non-x86), kernel-only page handlings (e.g.
CPA), or device driver codes when we're tackling with pure PFN mappings.
I introduced pte_none_mostly() in this series when we need to handle pte
markers the same as none pte, the "mostly" is the other way to write
"either none pte or a pte marker".
I didn't replace pte_none() to cover pte markers for below reasons:
- Very rare case of pte_none() callers will handle pte markers. E.g., all
the kernel pages do not require knowledge of pte markers. So we don't
pollute the major use cases.
- Unconditionally change pte_none() semantics could confuse people, because
pte_none() existed for so long a time.
- Unconditionally change pte_none() semantics could make pte_none() slower
even if in many cases pte markers do not exist.
- There're cases where we'd like to handle pte markers differntly from
pte_none(), so a full replace is also impossible. E.g. khugepaged should
still treat pte markers as normal swap ptes rather than none ptes, because
pte markers will always need a fault-in to merge the marker with a valid
pte. Or the smap code will need to parse PTE markers not none ptes.
Patch Layout
============
Introducing PTE marker and uffd-wp bit in PTE marker:
mm: Introduce PTE_MARKER swap entry
mm: Teach core mm about pte markers
mm: Check against orig_pte for finish_fault()
mm/uffd: PTE_MARKER_UFFD_WP
Adding support for shmem uffd-wp:
mm/shmem: Take care of UFFDIO_COPY_MODE_WP
mm/shmem: Handle uffd-wp special pte in page fault handler
mm/shmem: Persist uffd-wp bit across zapping for file-backed
mm/shmem: Allow uffd wr-protect none pte for file-backed mem
mm/shmem: Allows file-back mem to be uffd wr-protected on thps
mm/shmem: Handle uffd-wp during fork()
Adding support for hugetlbfs uffd-wp:
mm/hugetlb: Introduce huge pte version of uffd-wp helpers
mm/hugetlb: Hook page faults for uffd write protection
mm/hugetlb: Take care of UFFDIO_COPY_MODE_WP
mm/hugetlb: Handle UFFDIO_WRITEPROTECT
mm/hugetlb: Handle pte markers in page faults
mm/hugetlb: Allow uffd wr-protect none ptes
mm/hugetlb: Only drop uffd-wp special pte if required
mm/hugetlb: Handle uffd-wp during fork()
Misc handling on the rest mm for uffd-wp file-backed:
mm/khugepaged: Don't recycle vma pgtable if uffd-wp registered
mm/pagemap: Recognize uffd-wp bit for shmem/hugetlbfs
Enabling of uffd-wp on file-backed memory:
mm/uffd: Enable write protection for shmem & hugetlbfs
mm: Enable PTE markers by default
selftests/uffd: Enable uffd-wp for shmem/hugetlbfs
Tests
=====
- Compile test on x86_64 and aarch64 on different configs
- Kernel selftests
- uffd-test [0]
- Umapsort [1,2] test for shmem/hugetlb, with swap on/off
[0] https://github.com/xzpeter/clibs/tree/master/uffd-test
[1] https://github.com/xzpeter/umap-apps/tree/peter
[2] https://github.com/xzpeter/umap/tree/peter-shmem-hugetlbfs
This patch (of 23):
Introduces a new swap entry type called PTE_MARKER. It can be installed
for any pte that maps a file-backed memory when the pte is temporarily
zapped, so as to maintain per-pte information.
The information that kept in the pte is called a "marker". Here we define
the marker as "unsigned long" just to match pgoff_t, however it will only
work if it still fits in swp_offset(), which is e.g. currently 58 bits on
x86_64.
A new config CONFIG_PTE_MARKER is introduced too; it's by default off. A
bunch of helpers are defined altogether to service the rest of the pte
marker code.
[peterx@redhat.com: fixup]
Link: https://lkml.kernel.org/r/Yk2rdB7SXZf+2BDF@xz-m1.local
Link: https://lkml.kernel.org/r/20220405014646.13522-1-peterx@redhat.com
Link: https://lkml.kernel.org/r/20220405014646.13522-2-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-05-13 03:22:52 +00:00
|
|
|
|
|
|
|
help
|
|
|
|
Allows to create marker PTEs for file-backed memory.
|
|
|
|
|
2022-05-13 03:22:52 +00:00
|
|
|
config PTE_MARKER_UFFD_WP
|
2022-05-13 03:22:56 +00:00
|
|
|
bool "Userfaultfd write protection support for shmem/hugetlbfs"
|
|
|
|
default y
|
|
|
|
depends on HAVE_ARCH_USERFAULTFD_WP
|
|
|
|
select PTE_MARKER
|
2022-05-13 03:22:52 +00:00
|
|
|
|
|
|
|
help
|
|
|
|
Allows to create marker PTEs for userfaultfd write protection
|
|
|
|
purposes. It is required to enable userfaultfd write protection on
|
|
|
|
file-backed memory types like shmem and hugetlbfs.
|
|
|
|
|
mm: introduce Data Access MONitor (DAMON)
Patch series "Introduce Data Access MONitor (DAMON)", v34.
Introduction
============
DAMON is a data access monitoring framework for the Linux kernel. The
core mechanisms of DAMON called 'region based sampling' and 'adaptive
regions adjustment' (refer to 'mechanisms.rst' in the 11th patch of this
patchset for the detail) make it
- accurate (The monitored information is useful for DRAM level memory
management. It might not appropriate for Cache-level accuracy,
though.),
- light-weight (The monitoring overhead is low enough to be applied
online while making no impact on the performance of the target
workloads.), and
- scalable (the upper-bound of the instrumentation overhead is
controllable regardless of the size of target workloads.).
Using this framework, therefore, several memory management mechanisms such
as reclamation and THP can be optimized to aware real data access
patterns. Experimental access pattern aware memory management
optimization works that incurring high instrumentation overhead will be
able to have another try.
Though DAMON is for kernel subsystems, it can be easily exposed to the
user space by writing a DAMON-wrapper kernel subsystem. Then, user space
users who have some special workloads will be able to write personalized
tools or applications for deeper understanding and specialized
optimizations of their systems.
DAMON is also merged in two public Amazon Linux kernel trees that based on
v5.4.y[1] and v5.10.y[2].
[1] https://github.com/amazonlinux/linux/tree/amazon-5.4.y/master/mm/damon
[2] https://github.com/amazonlinux/linux/tree/amazon-5.10.y/master/mm/damon
The userspace tool[1] is available, released under GPLv2, and actively
being maintained. I am also planning to implement another basic user
interface in perf[2]. Also, the basic test suite for DAMON is available
under GPLv2[3].
[1] https://github.com/awslabs/damo
[2] https://lore.kernel.org/linux-mm/20210107120729.22328-1-sjpark@amazon.com/
[3] https://github.com/awslabs/damon-tests
Long-term Plan
--------------
DAMON is a part of a project called Data Access-aware Operating System
(DAOS). As the name implies, I want to improve the performance and
efficiency of systems using fine-grained data access patterns. The
optimizations are for both kernel and user spaces. I will therefore
modify or create kernel subsystems, export some of those to user space and
implement user space library / tools. Below shows the layers and
components for the project.
---------------------------------------------------------------------------
Primitives: PTE Accessed bit, PG_idle, rmap, (Intel CMT), ...
Framework: DAMON
Features: DAMOS, virtual addr, physical addr, ...
Applications: DAMON-debugfs, (DARC), ...
^^^^^^^^^^^^^^^^^^^^^^^ KERNEL SPACE ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Raw Interface: debugfs, (sysfs), (damonfs), tracepoints, (sys_damon), ...
vvvvvvvvvvvvvvvvvvvvvvv USER SPACE vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
Library: (libdamon), ...
Tools: DAMO, (perf), ...
---------------------------------------------------------------------------
The components in parentheses or marked as '...' are not implemented yet
but in the future plan. IOW, those are the TODO tasks of DAOS project.
For more detail, please refer to the plans:
https://lore.kernel.org/linux-mm/20201202082731.24828-1-sjpark@amazon.com/
Evaluations
===========
We evaluated DAMON's overhead, monitoring quality and usefulness using 24
realistic workloads on my QEMU/KVM based virtual machine running a kernel
that v24 DAMON patchset is applied.
DAMON is lightweight. It increases system memory usage by 0.39% and slows
target workloads down by 1.16%.
DAMON is accurate and useful for memory management optimizations. An
experimental DAMON-based operation scheme for THP, namely 'ethp', removes
76.15% of THP memory overheads while preserving 51.25% of THP speedup.
Another experimental DAMON-based 'proactive reclamation' implementation,
'prcl', reduces 93.38% of residential sets and 23.63% of system memory
footprint while incurring only 1.22% runtime overhead in the best case
(parsec3/freqmine).
NOTE that the experimental THP optimization and proactive reclamation are
not for production but only for proof of concepts.
Please refer to the official document[1] or "Documentation/admin-guide/mm:
Add a document for DAMON" patch in this patchset for detailed evaluation
setup and results.
[1] https://damonitor.github.io/doc/html/latest-damon/admin-guide/mm/damon/eval.html
Real-world User Story
=====================
In summary, DAMON has used on production systems and proved its usefulness.
DAMON as a profiler
-------------------
We analyzed characteristics of a large scale production systems of our
customers using DAMON. The systems utilize 70GB DRAM and 36 CPUs. From
this, we were able to find interesting things below.
There were obviously different access pattern under idle workload and
active workload. Under the idle workload, it accessed large memory
regions with low frequency, while the active workload accessed small
memory regions with high freuqnecy.
DAMON found a 7GB memory region that showing obviously high access
frequency under the active workload. We believe this is the
performance-effective working set and need to be protected.
There was a 4KB memory region that showing highest access frequency under
not only active but also idle workloads. We think this must be a hottest
code section like thing that should never be paged out.
For this analysis, DAMON used only 0.3-1% of single CPU time. Because we
used recording-based analysis, it consumed about 3-12 MB of disk space per
20 minutes. This is only small amount of disk space, but we can further
reduce the disk usage by using non-recording-based DAMON features. I'd
like to argue that only DAMON can do such detailed analysis (finding 4KB
highest region in 70GB memory) with the light overhead.
DAMON as a system optimization tool
-----------------------------------
We also found below potential performance problems on the systems and made
DAMON-based solutions.
The system doesn't want to make the workload suffer from the page
reclamation and thus it utilizes enough DRAM but no swap device. However,
we found the system is actively reclaiming file-backed pages, because the
system has intensive file IO. The file IO turned out to be not
performance critical for the workload, but the customer wanted to ensure
performance critical file-backed pages like code section to not mistakenly
be evicted.
Using direct IO should or `mlock()` would be a straightforward solution,
but modifying the user space code is not easy for the customer.
Alternatively, we could use DAMON-based operation scheme[1]. By using it,
we can ask DAMON to track access frequency of each region and make
'process_madvise(MADV_WILLNEED)[2]' call for regions having specific size
and access frequency for a time interval.
We also found the system is having high number of TLB misses. We tried
'always' THP enabled policy and it greatly reduced TLB misses, but the
page reclamation also been more frequent due to the THP internal
fragmentation caused memory bloat. We could try another DAMON-based
operation scheme that applies 'MADV_HUGEPAGE' to memory regions having
>=2MB size and high access frequency, while applying 'MADV_NOHUGEPAGE' to
regions having <2MB size and low access frequency.
We do not own the systems so we only reported the analysis results and
possible optimization solutions to the customers. The customers satisfied
about the analysis results and promised to try the optimization guides.
[1] https://lore.kernel.org/linux-mm/20201006123931.5847-1-sjpark@amazon.com/
[2] https://lore.kernel.org/linux-api/20200622192900.22757-4-minchan@kernel.org/
Comparison with Idle Page Tracking
==================================
Idle Page Tracking allows users to set and read idleness of pages using a
bitmap file which represents each page with each bit of the file. One
recommended usage of it is working set size detection. Users can do that
by
1. find PFN of each page for workloads in interest,
2. set all the pages as idle by doing writes to the bitmap file,
3. wait until the workload accesses its working set, and
4. read the idleness of the pages again and count pages became not idle.
NOTE: While Idle Page Tracking is for user space users, DAMON is primarily
designed for kernel subsystems though it can easily exposed to the user
space. Hence, this section only assumes such user space use of DAMON.
For what use cases Idle Page Tracking would be better?
------------------------------------------------------
1. Flexible usecases other than hotness monitoring.
Because Idle Page Tracking allows users to control the primitive (Page
idleness) by themselves, Idle Page Tracking users can do anything they
want. Meanwhile, DAMON is primarily designed to monitor the hotness of
each memory region. For this, DAMON asks users to provide sampling
interval and aggregation interval. For the reason, there could be some
use case that using Idle Page Tracking is simpler.
2. Physical memory monitoring.
Idle Page Tracking receives PFN range as input, so natively supports
physical memory monitoring.
DAMON is designed to be extensible for multiple address spaces and use
cases by implementing and using primitives for the given use case.
Therefore, by theory, DAMON has no limitation in the type of target
address space as long as primitives for the given address space exists.
However, the default primitives introduced by this patchset supports only
virtual address spaces.
Therefore, for physical memory monitoring, you should implement your own
primitives and use it, or simply use Idle Page Tracking.
Nonetheless, RFC patchsets[1] for the physical memory address space
primitives is already available. It also supports user memory same to
Idle Page Tracking.
[1] https://lore.kernel.org/linux-mm/20200831104730.28970-1-sjpark@amazon.com/
For what use cases DAMON is better?
-----------------------------------
1. Hotness Monitoring.
Idle Page Tracking let users know only if a page frame is accessed or not.
For hotness check, the user should write more code and use more memory.
DAMON do that by itself.
2. Low Monitoring Overhead
DAMON receives user's monitoring request with one step and then provide
the results. So, roughly speaking, DAMON require only O(1) user/kernel
context switches.
In case of Idle Page Tracking, however, because the interface receives
contiguous page frames, the number of user/kernel context switches
increases as the monitoring target becomes complex and huge. As a result,
the context switch overhead could be not negligible.
Moreover, DAMON is born to handle with the monitoring overhead. Because
the core mechanism is pure logical, Idle Page Tracking users might be able
to implement the mechanism on their own, but it would be time consuming
and the user/kernel context switching will still more frequent than that
of DAMON. Also, the kernel subsystems cannot use the logic in this case.
3. Page granularity working set size detection.
Until v22 of this patchset, this was categorized as the thing Idle Page
Tracking could do better, because DAMON basically maintains additional
metadata for each of the monitoring target regions. So, in the page
granularity working set size detection use case, DAMON would incur (number
of monitoring target pages * size of metadata) memory overhead. Size of
the single metadata item is about 54 bytes, so assuming 4KB pages, about
1.3% of monitoring target pages will be additionally used.
All essential metadata for Idle Page Tracking are embedded in 'struct
page' and page table entries. Therefore, in this use case, only one
counter variable for working set size accounting is required if Idle Page
Tracking is used.
There are more details to consider, but roughly speaking, this is true in
most cases.
However, the situation changed from v23. Now DAMON supports arbitrary
types of monitoring targets, which don't use the metadata. Using that,
DAMON can do the working set size detection with no additional space
overhead but less user-kernel context switch. A first draft for the
implementation of monitoring primitives for this usage is available in a
DAMON development tree[1]. An RFC patchset for it based on this patchset
will also be available soon.
Since v24, the arbitrary type support is dropped from this patchset
because this patchset doesn't introduce real use of the type. You can
still get it from the DAMON development tree[2], though.
[1] https://github.com/sjp38/linux/tree/damon/pgidle_hack
[2] https://github.com/sjp38/linux/tree/damon/master
4. More future usecases
While Idle Page Tracking has tight coupling with base primitives (PG_Idle
and page table Accessed bits), DAMON is designed to be extensible for many
use cases and address spaces. If you need some special address type or
want to use special h/w access check primitives, you can write your own
primitives for that and configure DAMON to use those. Therefore, if your
use case could be changed a lot in future, using DAMON could be better.
Can I use both Idle Page Tracking and DAMON?
--------------------------------------------
Yes, though using them concurrently for overlapping memory regions could
result in interference to each other. Nevertheless, such use case would
be rare or makes no sense at all. Even in the case, the noise would bot
be really significant. So, you can choose whatever you want depending on
the characteristics of your use cases.
More Information
================
We prepared a showcase web site[1] that you can get more information.
There are
- the official documentations[2],
- the heatmap format dynamic access pattern of various realistic workloads for
heap area[3], mmap()-ed area[4], and stack[5] area,
- the dynamic working set size distribution[6] and chronological working set
size changes[7], and
- the latest performance test results[8].
[1] https://damonitor.github.io/_index
[2] https://damonitor.github.io/doc/html/latest-damon
[3] https://damonitor.github.io/test/result/visual/latest/rec.heatmap.0.png.html
[4] https://damonitor.github.io/test/result/visual/latest/rec.heatmap.1.png.html
[5] https://damonitor.github.io/test/result/visual/latest/rec.heatmap.2.png.html
[6] https://damonitor.github.io/test/result/visual/latest/rec.wss_sz.png.html
[7] https://damonitor.github.io/test/result/visual/latest/rec.wss_time.png.html
[8] https://damonitor.github.io/test/result/perf/latest/html/index.html
Baseline and Complete Git Trees
===============================
The patches are based on the latest -mm tree, specifically
v5.14-rc1-mmots-2021-07-15-18-47 of https://github.com/hnaz/linux-mm. You can
also clone the complete git tree:
$ git clone git://github.com/sjp38/linux -b damon/patches/v34
The web is also available:
https://github.com/sjp38/linux/releases/tag/damon/patches/v34
Development Trees
-----------------
There are a couple of trees for entire DAMON patchset series and features
for future release.
- For latest release: https://github.com/sjp38/linux/tree/damon/master
- For next release: https://github.com/sjp38/linux/tree/damon/next
Long-term Support Trees
-----------------------
For people who want to test DAMON but using LTS kernels, there are another
couple of trees based on two latest LTS kernels respectively and
containing the 'damon/master' backports.
- For v5.4.y: https://github.com/sjp38/linux/tree/damon/for-v5.4.y
- For v5.10.y: https://github.com/sjp38/linux/tree/damon/for-v5.10.y
Amazon Linux Kernel Trees
-------------------------
DAMON is also merged in two public Amazon Linux kernel trees that based on
v5.4.y[1] and v5.10.y[2].
[1] https://github.com/amazonlinux/linux/tree/amazon-5.4.y/master/mm/damon
[2] https://github.com/amazonlinux/linux/tree/amazon-5.10.y/master/mm/damon
Git Tree for Diff of Patches
============================
For easy review of diff between different versions of each patch, I
prepared a git tree containing all versions of the DAMON patchset series:
https://github.com/sjp38/damon-patches
You can clone it and use 'diff' for easy review of changes between
different versions of the patchset. For example:
$ git clone https://github.com/sjp38/damon-patches && cd damon-patches
$ diff -u damon/v33 damon/v34
Sequence Of Patches
===================
First three patches implement the core logics of DAMON. The 1st patch
introduces basic sampling based hotness monitoring for arbitrary types of
targets. Following two patches implement the core mechanisms for control
of overhead and accuracy, namely regions based sampling (patch 2) and
adaptive regions adjustment (patch 3).
Now the essential parts of DAMON is complete, but it cannot work unless
someone provides monitoring primitives for a specific use case. The
following two patches make it just work for virtual address spaces
monitoring. The 4th patch makes 'PG_idle' can be used by DAMON and the
5th patch implements the virtual memory address space specific monitoring
primitives using page table Accessed bits and the 'PG_idle' page flag.
Now DAMON just works for virtual address space monitoring via the kernel
space api. To let the user space users can use DAMON, following four
patches add interfaces for them. The 6th patch adds a tracepoint for
monitoring results. The 7th patch implements a DAMON application kernel
module, namely damon-dbgfs, that simply wraps DAMON and exposes DAMON
interface to the user space via the debugfs interface. The 8th patch
further exports pid of monitoring thread (kdamond) to user space for
easier cpu usage accounting, and the 9th patch makes the debugfs interface
to support multiple contexts.
Three patches for maintainability follows. The 10th patch adds
documentations for both the user space and the kernel space. The 11th
patch provides unit tests (based on the kunit) while the 12th patch adds
user space tests (based on the kselftest).
Finally, the last patch (13th) updates the MAINTAINERS file.
This patch (of 13):
DAMON is a data access monitoring framework for the Linux kernel. The
core mechanisms of DAMON make it
- accurate (the monitoring output is useful enough for DRAM level
performance-centric memory management; It might be inappropriate for
CPU cache levels, though),
- light-weight (the monitoring overhead is normally low enough to be
applied online), and
- scalable (the upper-bound of the overhead is in constant range
regardless of the size of target workloads).
Using this framework, hence, we can easily write efficient kernel space
data access monitoring applications. For example, the kernel's memory
management mechanisms can make advanced decisions using this.
Experimental data access aware optimization works that incurring high
access monitoring overhead could again be implemented on top of this.
Due to its simple and flexible interface, providing user space interface
would be also easy. Then, user space users who have some special
workloads can write personalized applications for better understanding and
optimizations of their workloads and systems.
===
Nevertheless, this commit is defining and implementing only basic access
check part without the overhead-accuracy handling core logic. The basic
access check is as below.
The output of DAMON says what memory regions are how frequently accessed
for a given duration. The resolution of the access frequency is
controlled by setting ``sampling interval`` and ``aggregation interval``.
In detail, DAMON checks access to each page per ``sampling interval`` and
aggregates the results. In other words, counts the number of the accesses
to each region. After each ``aggregation interval`` passes, DAMON calls
callback functions that previously registered by users so that users can
read the aggregated results and then clears the results. This can be
described in below simple pseudo-code::
init()
while monitoring_on:
for page in monitoring_target:
if accessed(page):
nr_accesses[page] += 1
if time() % aggregation_interval == 0:
for callback in user_registered_callbacks:
callback(monitoring_target, nr_accesses)
for page in monitoring_target:
nr_accesses[page] = 0
if time() % update_interval == 0:
update()
sleep(sampling interval)
The target regions constructed at the beginning of the monitoring and
updated after each ``regions_update_interval``, because the target regions
could be dynamically changed (e.g., mmap() or memory hotplug). The
monitoring overhead of this mechanism will arbitrarily increase as the
size of the target workload grows.
The basic monitoring primitives for actual access check and dynamic target
regions construction aren't in the core part of DAMON. Instead, it allows
users to implement their own primitives that are optimized for their use
case and configure DAMON to use those. In other words, users cannot use
current version of DAMON without some additional works.
Following commits will implement the core mechanisms for the
overhead-accuracy control and default primitives implementations.
Link: https://lkml.kernel.org/r/20210716081449.22187-1-sj38.park@gmail.com
Link: https://lkml.kernel.org/r/20210716081449.22187-2-sj38.park@gmail.com
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reviewed-by: Leonard Foerster <foersleo@amazon.de>
Reviewed-by: Fernand Sieber <sieberf@amazon.com>
Acked-by: Shakeel Butt <shakeelb@google.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Amit Shah <amit@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Woodhouse <dwmw@amazon.com>
Cc: Marco Elver <elver@google.com>
Cc: Fan Du <fan.du@intel.com>
Cc: Greg Kroah-Hartman <greg@kroah.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Joe Perches <joe@perches.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Maximilian Heyne <mheyne@amazon.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: Markus Boehme <markubo@amazon.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-09-08 02:56:28 +00:00
|
|
|
source "mm/damon/Kconfig"
|
|
|
|
|
2018-07-31 11:39:35 +00:00
|
|
|
endmenu
|