mm: attempt to batch free swap entries for zap_pte_range()

Zhiguo reported that swap release could be a serious bottleneck during
process exits[1].  With mTHP, we have the opportunity to batch free swaps.

Thanks to the work of Chris and Kairui[2], I was able to achieve this
optimization with minimal code changes by building on their efforts.

If swap_count is 1, which is likely true as most anon memory are private,
we can free all contiguous swap slots all together.

Ran the below test program for measuring the bandwidth of munmap
using zRAM and 64KiB mTHP:

 #include <sys/mman.h>
 #include <sys/time.h>
 #include <stdlib.h>

 unsigned long long tv_to_ms(struct timeval tv)
 {
        return tv.tv_sec * 1000 + tv.tv_usec / 1000;
 }

 main()
 {
        struct timeval tv_b, tv_e;
        int i;
 #define SIZE 1024*1024*1024
        void *p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,
                                MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
        if (!p) {
                perror("fail to get memory");
                exit(-1);
        }

        madvise(p, SIZE, MADV_HUGEPAGE);
        memset(p, 0x11, SIZE); /* write to get mem */

        madvise(p, SIZE, MADV_PAGEOUT);

        gettimeofday(&tv_b, NULL);
        munmap(p, SIZE);
        gettimeofday(&tv_e, NULL);

        printf("munmap in bandwidth: %ld bytes/ms\n",
                        SIZE/(tv_to_ms(tv_e) - tv_to_ms(tv_b)));
 }

The result is as below (munmap bandwidth):
                mm-unstable  mm-unstable-with-patch
   round1       21053761      63161283
   round2       21053761      63161283
   round3       21053761      63161283
   round4       20648881      67108864
   round5       20648881      67108864

munmap bandwidth becomes 3X faster.

[1] https://lore.kernel.org/linux-mm/20240731133318.527-1-justinjiang@vivo.com/
[2] https://lore.kernel.org/linux-mm/20240730-swap-allocator-v5-0-cb9c148b9297@kernel.org/

[v-songbaohua@oppo.com: check all swaps belong to same swap_cgroup in swap_pte_batch()]
  Link: https://lkml.kernel.org/r/20240815215308.55233-1-21cnbao@gmail.com
[hughd@google.com: add mem_cgroup_disabled() check]
  Link: https://lkml.kernel.org/r/33f34a88-0130-5444-9b84-93198eeb50e7@google.com
[21cnbao@gmail.com: add missing zswap_invalidate()]
  Link: https://lkml.kernel.org/r/20240821054921.43468-1-21cnbao@gmail.com
Link: https://lkml.kernel.org/r/20240807215859.57491-3-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Barry Song 2024-08-08 09:58:59 +12:00 committed by Andrew Morton
parent b85508d7de
commit bea67dcc5e
3 changed files with 76 additions and 13 deletions

View File

@ -15,6 +15,7 @@
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
#include <linux/tracepoint-defs.h>
/* Internal core VMA manipulation functions. */
@ -275,18 +276,22 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
{
pte_t expected_pte = pte_next_swp_offset(pte);
const pte_t *end_ptep = start_ptep + max_nr;
swp_entry_t entry = pte_to_swp_entry(pte);
pte_t *ptep = start_ptep + 1;
unsigned short cgroup_id;
VM_WARN_ON(max_nr < 1);
VM_WARN_ON(!is_swap_pte(pte));
VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
VM_WARN_ON(non_swap_entry(entry));
cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
pte = ptep_get(ptep);
if (!pte_same(pte, expected_pte))
break;
if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
break;
expected_pte = pte_next_swp_offset(expected_pte);
ptep++;
}

View File

@ -161,6 +161,8 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
*/
unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
{
if (mem_cgroup_disabled())
return 0;
return lookup_swap_cgroup(ent, NULL)->id;
}

View File

@ -156,6 +156,25 @@ static bool swap_is_has_cache(struct swap_info_struct *si,
return true;
}
static bool swap_is_last_map(struct swap_info_struct *si,
unsigned long offset, int nr_pages, bool *has_cache)
{
unsigned char *map = si->swap_map + offset;
unsigned char *map_end = map + nr_pages;
unsigned char count = *map;
if (swap_count(count) != 1)
return false;
while (++map < map_end) {
if (*map != count)
return false;
}
*has_cache = !!(count & SWAP_HAS_CACHE);
return true;
}
/*
* returns number of pages in the folio that backs the swap entry. If positive,
* the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
@ -1469,6 +1488,53 @@ static unsigned char __swap_entry_free(struct swap_info_struct *si,
return usage;
}
static bool __swap_entries_free(struct swap_info_struct *si,
swp_entry_t entry, int nr)
{
unsigned long offset = swp_offset(entry);
unsigned int type = swp_type(entry);
struct swap_cluster_info *ci;
bool has_cache = false;
unsigned char count;
int i;
if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
goto fallback;
/* cross into another cluster */
if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
goto fallback;
ci = lock_cluster_or_swap_info(si, offset);
if (!swap_is_last_map(si, offset, nr, &has_cache)) {
unlock_cluster_or_swap_info(si, ci);
goto fallback;
}
for (i = 0; i < nr; i++)
WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
unlock_cluster_or_swap_info(si, ci);
if (!has_cache) {
for (i = 0; i < nr; i++)
zswap_invalidate(swp_entry(si->type, offset + i));
spin_lock(&si->lock);
swap_entry_range_free(si, entry, nr);
spin_unlock(&si->lock);
}
return has_cache;
fallback:
for (i = 0; i < nr; i++) {
if (data_race(si->swap_map[offset + i])) {
count = __swap_entry_free(si, swp_entry(type, offset + i));
if (count == SWAP_HAS_CACHE)
has_cache = true;
} else {
WARN_ON_ONCE(1);
}
}
return has_cache;
}
/*
* Drop the last HAS_CACHE flag of swap entries, caller have to
* ensure all entries belong to the same cgroup.
@ -1792,11 +1858,9 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
{
const unsigned long start_offset = swp_offset(entry);
const unsigned long end_offset = start_offset + nr;
unsigned int type = swp_type(entry);
struct swap_info_struct *si;
bool any_only_cache = false;
unsigned long offset;
unsigned char count;
if (non_swap_entry(entry))
return;
@ -1811,15 +1875,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
/*
* First free all entries in the range.
*/
for (offset = start_offset; offset < end_offset; offset++) {
if (data_race(si->swap_map[offset])) {
count = __swap_entry_free(si, swp_entry(type, offset));
if (count == SWAP_HAS_CACHE)
any_only_cache = true;
} else {
WARN_ON_ONCE(1);
}
}
any_only_cache = __swap_entries_free(si, entry, nr);
/*
* Short-circuit the below loop if none of the entries had their