2022-05-10 01:20:47 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _MM_SWAP_H
|
|
|
|
#define _MM_SWAP_H
|
|
|
|
|
mempolicy: alloc_pages_mpol() for NUMA policy without vma
Shrink shmem's stack usage by eliminating the pseudo-vma from its folio
allocation. alloc_pages_mpol(gfp, order, pol, ilx, nid) becomes the
principal actor for passing mempolicy choice down to __alloc_pages(),
rather than vma_alloc_folio(gfp, order, vma, addr, hugepage).
vma_alloc_folio() and alloc_pages() remain, but as wrappers around
alloc_pages_mpol(). alloc_pages_bulk_*() untouched, except to provide the
additional args to policy_nodemask(), which subsumes policy_node().
Cleanup throughout, cutting out some unhelpful "helpers".
It would all be much simpler without MPOL_INTERLEAVE, but that adds a
dynamic to the constant mpol: complicated by v3.6 commit 09c231cb8bfd
("tmpfs: distribute interleave better across nodes"), which added ino bias
to the interleave, hidden from mm/mempolicy.c until this commit.
Hence "ilx" throughout, the "interleave index". Originally I thought it
could be done just with nid, but that's wrong: the nodemask may come from
the shared policy layer below a shmem vma, or it may come from the task
layer above a shmem vma; and without the final nodemask then nodeid cannot
be decided. And how ilx is applied depends also on page order.
The interleave index is almost always irrelevant unless MPOL_INTERLEAVE:
with one exception in alloc_pages_mpol(), where the NO_INTERLEAVE_INDEX
passed down from vma-less alloc_pages() is also used as hint not to use
THP-style hugepage allocation - to avoid the overhead of a hugepage arg
(though I don't understand why we never just added a GFP bit for THP - if
it actually needs a different allocation strategy from other pages of the
same order). vma_alloc_folio() still carries its hugepage arg here, but
it is not used, and should be removed when agreed.
get_vma_policy() no longer allows a NULL vma: over time I believe we've
eradicated all the places which used to need it e.g. swapoff and madvise
used to pass NULL vma to read_swap_cache_async(), but now know the vma.
[hughd@google.com: handle NULL mpol being passed to __read_swap_cache_async()]
Link: https://lkml.kernel.org/r/ea419956-4751-0102-21f7-9c93cb957892@google.com
Link: https://lkml.kernel.org/r/74e34633-6060-f5e3-aee-7040d43f2e93@google.com
Link: https://lkml.kernel.org/r/1738368e-bac0-fd11-ed7f-b87142a939fe@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tejun heo <tj@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Domenico Cerasuolo <mimmocerasuolo@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-10-19 20:39:08 +00:00
|
|
|
struct mempolicy;
|
|
|
|
|
2022-05-10 01:20:47 +00:00
|
|
|
#ifdef CONFIG_SWAP
|
2024-05-21 17:58:50 +00:00
|
|
|
#include <linux/swapops.h> /* for swp_offset */
|
2022-05-10 01:20:47 +00:00
|
|
|
#include <linux/blk_types.h> /* for bio_end_io_t */
|
|
|
|
|
|
|
|
/* linux/mm/page_io.c */
|
2022-05-10 01:20:48 +00:00
|
|
|
int sio_pool_init(void);
|
2022-05-10 01:20:49 +00:00
|
|
|
struct swap_iocb;
|
2024-06-07 04:55:15 +00:00
|
|
|
void swap_read_folio(struct folio *folio, struct swap_iocb **plug);
|
2022-05-10 01:20:49 +00:00
|
|
|
void __swap_read_unplug(struct swap_iocb *plug);
|
|
|
|
static inline void swap_read_unplug(struct swap_iocb *plug)
|
|
|
|
{
|
|
|
|
if (unlikely(plug))
|
|
|
|
__swap_read_unplug(plug);
|
|
|
|
}
|
2022-05-10 01:20:49 +00:00
|
|
|
void swap_write_unplug(struct swap_iocb *sio);
|
2022-05-10 01:20:47 +00:00
|
|
|
int swap_writepage(struct page *page, struct writeback_control *wbc);
|
2023-12-13 21:58:31 +00:00
|
|
|
void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
|
2022-05-10 01:20:47 +00:00
|
|
|
|
|
|
|
/* linux/mm/swap_state.c */
|
|
|
|
/* One swap address space for each 64M swap space */
|
|
|
|
#define SWAP_ADDRESS_SPACE_SHIFT 14
|
|
|
|
#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
|
mm/swap: reduce swap cache search space
Currently we use one swap_address_space for every 64M chunk to reduce lock
contention, this is like having a set of smaller swap files inside one
swap device. But when doing swap cache look up or insert, we are still
using the offset of the whole large swap device. This is OK for
correctness, as the offset (key) is unique.
But Xarray is specially optimized for small indexes, it creates the radix
tree levels lazily to be just enough to fit the largest key stored in one
Xarray. So we are wasting tree nodes unnecessarily.
For 64M chunk it should only take at most 3 levels to contain everything.
But if we are using the offset from the whole swap device, the offset
(key) value will be way beyond 64M, and so will the tree level.
Optimize this by using a new helper swap_cache_index to get a swap entry's
unique offset in its own 64M swap_address_space.
I see a ~1% performance gain in benchmark and actual workload with high
memory pressure.
Test with `time memhog 128G` inside a 8G memcg using 128G swap (ramdisk
with SWP_SYNCHRONOUS_IO dropped, tested 3 times, results are stable. The
test result is similar but the improvement is smaller if
SWP_SYNCHRONOUS_IO is enabled, as swap out path can never skip swap
cache):
Before:
6.07user 250.74system 4:17.26elapsed 99%CPU (0avgtext+0avgdata 8373376maxresident)k
0inputs+0outputs (55major+33555018minor)pagefaults 0swaps
After (1.8% faster):
6.08user 246.09system 4:12.58elapsed 99%CPU (0avgtext+0avgdata 8373248maxresident)k
0inputs+0outputs (54major+33555027minor)pagefaults 0swaps
Similar result with MySQL and sysbench using swap:
Before:
94055.61 qps
After (0.8% faster):
94834.91 qps
Radix tree slab usage is also very slightly lower.
Link: https://lkml.kernel.org/r/20240521175854.96038-12-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Cc: Anna Schumaker <anna@kernel.org>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Marc Dionne <marc.dionne@auristor.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Xiubo Li <xiubli@redhat.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-05-21 17:58:53 +00:00
|
|
|
#define SWAP_ADDRESS_SPACE_MASK (SWAP_ADDRESS_SPACE_PAGES - 1)
|
2022-05-10 01:20:47 +00:00
|
|
|
extern struct address_space *swapper_spaces[];
|
|
|
|
#define swap_address_space(entry) \
|
|
|
|
(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
|
|
|
|
>> SWAP_ADDRESS_SPACE_SHIFT])
|
|
|
|
|
2024-05-21 17:58:50 +00:00
|
|
|
/*
|
|
|
|
* Return the swap device position of the swap entry.
|
|
|
|
*/
|
|
|
|
static inline loff_t swap_dev_pos(swp_entry_t entry)
|
|
|
|
{
|
|
|
|
return ((loff_t)swp_offset(entry)) << PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
mm/swap: reduce swap cache search space
Currently we use one swap_address_space for every 64M chunk to reduce lock
contention, this is like having a set of smaller swap files inside one
swap device. But when doing swap cache look up or insert, we are still
using the offset of the whole large swap device. This is OK for
correctness, as the offset (key) is unique.
But Xarray is specially optimized for small indexes, it creates the radix
tree levels lazily to be just enough to fit the largest key stored in one
Xarray. So we are wasting tree nodes unnecessarily.
For 64M chunk it should only take at most 3 levels to contain everything.
But if we are using the offset from the whole swap device, the offset
(key) value will be way beyond 64M, and so will the tree level.
Optimize this by using a new helper swap_cache_index to get a swap entry's
unique offset in its own 64M swap_address_space.
I see a ~1% performance gain in benchmark and actual workload with high
memory pressure.
Test with `time memhog 128G` inside a 8G memcg using 128G swap (ramdisk
with SWP_SYNCHRONOUS_IO dropped, tested 3 times, results are stable. The
test result is similar but the improvement is smaller if
SWP_SYNCHRONOUS_IO is enabled, as swap out path can never skip swap
cache):
Before:
6.07user 250.74system 4:17.26elapsed 99%CPU (0avgtext+0avgdata 8373376maxresident)k
0inputs+0outputs (55major+33555018minor)pagefaults 0swaps
After (1.8% faster):
6.08user 246.09system 4:12.58elapsed 99%CPU (0avgtext+0avgdata 8373248maxresident)k
0inputs+0outputs (54major+33555027minor)pagefaults 0swaps
Similar result with MySQL and sysbench using swap:
Before:
94055.61 qps
After (0.8% faster):
94834.91 qps
Radix tree slab usage is also very slightly lower.
Link: https://lkml.kernel.org/r/20240521175854.96038-12-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Cc: Anna Schumaker <anna@kernel.org>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Marc Dionne <marc.dionne@auristor.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Xiubo Li <xiubli@redhat.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-05-21 17:58:53 +00:00
|
|
|
/*
|
|
|
|
* Return the swap cache index of the swap entry.
|
|
|
|
*/
|
|
|
|
static inline pgoff_t swap_cache_index(swp_entry_t entry)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON((SWP_OFFSET_MASK | SWAP_ADDRESS_SPACE_MASK) != SWP_OFFSET_MASK);
|
|
|
|
return swp_offset(entry) & SWAP_ADDRESS_SPACE_MASK;
|
|
|
|
}
|
|
|
|
|
2022-05-10 01:20:47 +00:00
|
|
|
void show_swap_cache_info(void);
|
2022-05-13 03:23:02 +00:00
|
|
|
bool add_to_swap(struct folio *folio);
|
2022-05-10 01:20:47 +00:00
|
|
|
void *get_shadow_from_swap_cache(swp_entry_t entry);
|
2022-09-02 19:46:08 +00:00
|
|
|
int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
|
2022-05-10 01:20:47 +00:00
|
|
|
gfp_t gfp, void **shadowp);
|
2022-06-17 17:50:20 +00:00
|
|
|
void __delete_from_swap_cache(struct folio *folio,
|
2022-05-10 01:20:47 +00:00
|
|
|
swp_entry_t entry, void *shadow);
|
2022-06-17 17:50:19 +00:00
|
|
|
void delete_from_swap_cache(struct folio *folio);
|
2022-05-10 01:20:47 +00:00
|
|
|
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
|
|
|
unsigned long end);
|
2024-07-30 07:13:39 +00:00
|
|
|
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
|
2022-09-02 19:46:15 +00:00
|
|
|
struct folio *swap_cache_get_folio(swp_entry_t entry,
|
|
|
|
struct vm_area_struct *vma, unsigned long addr);
|
2022-10-19 18:33:31 +00:00
|
|
|
struct folio *filemap_get_incore_folio(struct address_space *mapping,
|
|
|
|
pgoff_t index);
|
2022-05-10 01:20:47 +00:00
|
|
|
|
2023-12-13 21:58:41 +00:00
|
|
|
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
|
struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
struct swap_iocb **plug);
|
2023-12-13 21:58:30 +00:00
|
|
|
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
|
|
|
|
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
|
|
|
|
bool skip_if_exists);
|
2023-12-13 21:58:42 +00:00
|
|
|
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
|
|
|
|
struct mempolicy *mpol, pgoff_t ilx);
|
2024-08-07 19:37:32 +00:00
|
|
|
struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
|
|
|
|
struct vm_fault *vmf);
|
2022-05-10 01:20:47 +00:00
|
|
|
|
2022-06-17 17:50:18 +00:00
|
|
|
static inline unsigned int folio_swap_flags(struct folio *folio)
|
2022-05-10 01:20:48 +00:00
|
|
|
{
|
2023-12-13 21:58:40 +00:00
|
|
|
return swp_swap_info(folio->swap)->flags;
|
2022-05-10 01:20:48 +00:00
|
|
|
}
|
2022-05-10 01:20:47 +00:00
|
|
|
#else /* CONFIG_SWAP */
|
2022-05-10 01:20:49 +00:00
|
|
|
struct swap_iocb;
|
2024-06-07 04:55:15 +00:00
|
|
|
static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
|
2022-05-10 01:20:47 +00:00
|
|
|
{
|
|
|
|
}
|
2022-05-10 01:20:49 +00:00
|
|
|
static inline void swap_write_unplug(struct swap_iocb *sio)
|
|
|
|
{
|
|
|
|
}
|
2022-05-10 01:20:47 +00:00
|
|
|
|
|
|
|
static inline struct address_space *swap_address_space(swp_entry_t entry)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
mm/swap: reduce swap cache search space
Currently we use one swap_address_space for every 64M chunk to reduce lock
contention, this is like having a set of smaller swap files inside one
swap device. But when doing swap cache look up or insert, we are still
using the offset of the whole large swap device. This is OK for
correctness, as the offset (key) is unique.
But Xarray is specially optimized for small indexes, it creates the radix
tree levels lazily to be just enough to fit the largest key stored in one
Xarray. So we are wasting tree nodes unnecessarily.
For 64M chunk it should only take at most 3 levels to contain everything.
But if we are using the offset from the whole swap device, the offset
(key) value will be way beyond 64M, and so will the tree level.
Optimize this by using a new helper swap_cache_index to get a swap entry's
unique offset in its own 64M swap_address_space.
I see a ~1% performance gain in benchmark and actual workload with high
memory pressure.
Test with `time memhog 128G` inside a 8G memcg using 128G swap (ramdisk
with SWP_SYNCHRONOUS_IO dropped, tested 3 times, results are stable. The
test result is similar but the improvement is smaller if
SWP_SYNCHRONOUS_IO is enabled, as swap out path can never skip swap
cache):
Before:
6.07user 250.74system 4:17.26elapsed 99%CPU (0avgtext+0avgdata 8373376maxresident)k
0inputs+0outputs (55major+33555018minor)pagefaults 0swaps
After (1.8% faster):
6.08user 246.09system 4:12.58elapsed 99%CPU (0avgtext+0avgdata 8373248maxresident)k
0inputs+0outputs (54major+33555027minor)pagefaults 0swaps
Similar result with MySQL and sysbench using swap:
Before:
94055.61 qps
After (0.8% faster):
94834.91 qps
Radix tree slab usage is also very slightly lower.
Link: https://lkml.kernel.org/r/20240521175854.96038-12-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Cc: Anna Schumaker <anna@kernel.org>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Marc Dionne <marc.dionne@auristor.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Xiubo Li <xiubli@redhat.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-05-21 17:58:53 +00:00
|
|
|
static inline pgoff_t swap_cache_index(swp_entry_t entry)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-10 01:20:47 +00:00
|
|
|
static inline void show_swap_cache_info(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2023-12-13 21:58:42 +00:00
|
|
|
static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
|
mempolicy: alloc_pages_mpol() for NUMA policy without vma
Shrink shmem's stack usage by eliminating the pseudo-vma from its folio
allocation. alloc_pages_mpol(gfp, order, pol, ilx, nid) becomes the
principal actor for passing mempolicy choice down to __alloc_pages(),
rather than vma_alloc_folio(gfp, order, vma, addr, hugepage).
vma_alloc_folio() and alloc_pages() remain, but as wrappers around
alloc_pages_mpol(). alloc_pages_bulk_*() untouched, except to provide the
additional args to policy_nodemask(), which subsumes policy_node().
Cleanup throughout, cutting out some unhelpful "helpers".
It would all be much simpler without MPOL_INTERLEAVE, but that adds a
dynamic to the constant mpol: complicated by v3.6 commit 09c231cb8bfd
("tmpfs: distribute interleave better across nodes"), which added ino bias
to the interleave, hidden from mm/mempolicy.c until this commit.
Hence "ilx" throughout, the "interleave index". Originally I thought it
could be done just with nid, but that's wrong: the nodemask may come from
the shared policy layer below a shmem vma, or it may come from the task
layer above a shmem vma; and without the final nodemask then nodeid cannot
be decided. And how ilx is applied depends also on page order.
The interleave index is almost always irrelevant unless MPOL_INTERLEAVE:
with one exception in alloc_pages_mpol(), where the NO_INTERLEAVE_INDEX
passed down from vma-less alloc_pages() is also used as hint not to use
THP-style hugepage allocation - to avoid the overhead of a hugepage arg
(though I don't understand why we never just added a GFP bit for THP - if
it actually needs a different allocation strategy from other pages of the
same order). vma_alloc_folio() still carries its hugepage arg here, but
it is not used, and should be removed when agreed.
get_vma_policy() no longer allows a NULL vma: over time I believe we've
eradicated all the places which used to need it e.g. swapoff and madvise
used to pass NULL vma to read_swap_cache_async(), but now know the vma.
[hughd@google.com: handle NULL mpol being passed to __read_swap_cache_async()]
Link: https://lkml.kernel.org/r/ea419956-4751-0102-21f7-9c93cb957892@google.com
Link: https://lkml.kernel.org/r/74e34633-6060-f5e3-aee-7040d43f2e93@google.com
Link: https://lkml.kernel.org/r/1738368e-bac0-fd11-ed7f-b87142a939fe@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tejun heo <tj@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Domenico Cerasuolo <mimmocerasuolo@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-10-19 20:39:08 +00:00
|
|
|
gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
|
2022-05-10 01:20:47 +00:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-08-07 19:37:32 +00:00
|
|
|
static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
|
2022-05-10 01:20:47 +00:00
|
|
|
struct vm_fault *vmf)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-07-30 07:13:39 +00:00
|
|
|
static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
|
mm/swap: fix race when skipping swapcache
When skipping swapcache for SWP_SYNCHRONOUS_IO, if two or more threads
swapin the same entry at the same time, they get different pages (A, B).
Before one thread (T0) finishes the swapin and installs page (A) to the
PTE, another thread (T1) could finish swapin of page (B), swap_free the
entry, then swap out the possibly modified page reusing the same entry.
It breaks the pte_same check in (T0) because PTE value is unchanged,
causing ABA problem. Thread (T0) will install a stalled page (A) into the
PTE and cause data corruption.
One possible callstack is like this:
CPU0 CPU1
---- ----
do_swap_page() do_swap_page() with same entry
<direct swapin path> <direct swapin path>
<alloc page A> <alloc page B>
swap_read_folio() <- read to page A swap_read_folio() <- read to page B
<slow on later locks or interrupt> <finished swapin first>
... set_pte_at()
swap_free() <- entry is free
<write to page B, now page A stalled>
<swap out page B to same swap entry>
pte_same() <- Check pass, PTE seems
unchanged, but page A
is stalled!
swap_free() <- page B content lost!
set_pte_at() <- staled page A installed!
And besides, for ZRAM, swap_free() allows the swap device to discard the
entry content, so even if page (B) is not modified, if swap_read_folio()
on CPU0 happens later than swap_free() on CPU1, it may also cause data
loss.
To fix this, reuse swapcache_prepare which will pin the swap entry using
the cache flag, and allow only one thread to swap it in, also prevent any
parallel code from putting the entry in the cache. Release the pin after
PT unlocked.
Racers just loop and wait since it's a rare and very short event. A
schedule_timeout_uninterruptible(1) call is added to avoid repeated page
faults wasting too much CPU, causing livelock or adding too much noise to
perf statistics. A similar livelock issue was described in commit
029c4628b2eb ("mm: swap: get rid of livelock in swapin readahead")
Reproducer:
This race issue can be triggered easily using a well constructed
reproducer and patched brd (with a delay in read path) [1]:
With latest 6.8 mainline, race caused data loss can be observed easily:
$ gcc -g -lpthread test-thread-swap-race.c && ./a.out
Polulating 32MB of memory region...
Keep swapping out...
Starting round 0...
Spawning 65536 workers...
32746 workers spawned, wait for done...
Round 0: Error on 0x5aa00, expected 32746, got 32743, 3 data loss!
Round 0: Error on 0x395200, expected 32746, got 32743, 3 data loss!
Round 0: Error on 0x3fd000, expected 32746, got 32737, 9 data loss!
Round 0 Failed, 15 data loss!
This reproducer spawns multiple threads sharing the same memory region
using a small swap device. Every two threads updates mapped pages one by
one in opposite direction trying to create a race, with one dedicated
thread keep swapping out the data out using madvise.
The reproducer created a reproduce rate of about once every 5 minutes, so
the race should be totally possible in production.
After this patch, I ran the reproducer for over a few hundred rounds and
no data loss observed.
Performance overhead is minimal, microbenchmark swapin 10G from 32G
zram:
Before: 10934698 us
After: 11157121 us
Cached: 13155355 us (Dropping SWP_SYNCHRONOUS_IO flag)
[kasong@tencent.com: v4]
Link: https://lkml.kernel.org/r/20240219082040.7495-1-ryncsn@gmail.com
Link: https://lkml.kernel.org/r/20240206182559.32264-1-ryncsn@gmail.com
Fixes: 0bcac06f27d7 ("mm, swap: skip swapcache for swapin of synchronous device")
Reported-by: "Huang, Ying" <ying.huang@intel.com>
Closes: https://lore.kernel.org/lkml/87bk92gqpx.fsf_-_@yhuang6-desk2.ccr.corp.intel.com/
Link: https://github.com/ryncsn/emm-test-project/tree/master/swap-stress-race [1]
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Acked-by: Yu Zhao <yuzhao@google.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Chris Li <chrisl@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-02-06 18:25:59 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-09-02 19:46:15 +00:00
|
|
|
static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
|
|
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-05-10 01:20:47 +00:00
|
|
|
static inline
|
2022-10-19 18:33:31 +00:00
|
|
|
struct folio *filemap_get_incore_folio(struct address_space *mapping,
|
|
|
|
pgoff_t index)
|
2022-05-10 01:20:47 +00:00
|
|
|
{
|
2022-10-19 18:33:31 +00:00
|
|
|
return filemap_get_folio(mapping, index);
|
2022-05-10 01:20:47 +00:00
|
|
|
}
|
|
|
|
|
2022-05-13 03:23:02 +00:00
|
|
|
static inline bool add_to_swap(struct folio *folio)
|
2022-05-10 01:20:47 +00:00
|
|
|
{
|
2022-05-13 03:23:02 +00:00
|
|
|
return false;
|
2022-05-10 01:20:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-09-02 19:46:08 +00:00
|
|
|
static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
|
2022-05-10 01:20:47 +00:00
|
|
|
gfp_t gfp_mask, void **shadowp)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-06-17 17:50:20 +00:00
|
|
|
static inline void __delete_from_swap_cache(struct folio *folio,
|
2022-05-10 01:20:47 +00:00
|
|
|
swp_entry_t entry, void *shadow)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-06-17 17:50:19 +00:00
|
|
|
static inline void delete_from_swap_cache(struct folio *folio)
|
2022-05-10 01:20:47 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-06-17 17:50:18 +00:00
|
|
|
static inline unsigned int folio_swap_flags(struct folio *folio)
|
2022-05-10 01:20:48 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2022-05-10 01:20:47 +00:00
|
|
|
#endif /* CONFIG_SWAP */
|
2024-07-30 07:13:39 +00:00
|
|
|
|
2022-05-10 01:20:47 +00:00
|
|
|
#endif /* _MM_SWAP_H */
|