mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
selftests/vm: cow: basic COW tests for non-anonymous pages
Let's add basic tests for COW with non-anonymous pages in private mappings: write access should properly trigger COW and result in the private changes not being visible through other page mappings. Especially, add tests for: * Zeropage * Huge zeropage * Ordinary pagecache pages via memfd and tmpfile() * Hugetlb pages via memfd Fortunately, all tests pass. Link: https://lkml.kernel.org/r/20221116102659.70287-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7aca5ca154
commit
f8664f3c4a
@ -19,6 +19,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/wait.h>
|
||||
#include <linux/memfd.h>
|
||||
|
||||
#include "local_config.h"
|
||||
#ifdef LOCAL_CONFIG_HAVE_LIBURING
|
||||
@ -35,6 +36,7 @@ static size_t thpsize;
|
||||
static int nr_hugetlbsizes;
|
||||
static size_t hugetlbsizes[10];
|
||||
static int gup_fd;
|
||||
static bool has_huge_zeropage;
|
||||
|
||||
static void detect_thpsize(void)
|
||||
{
|
||||
@ -64,6 +66,31 @@ static void detect_thpsize(void)
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static void detect_huge_zeropage(void)
|
||||
{
|
||||
int fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page",
|
||||
O_RDONLY);
|
||||
size_t enabled = 0;
|
||||
char buf[15];
|
||||
int ret;
|
||||
|
||||
if (fd < 0)
|
||||
return;
|
||||
|
||||
ret = pread(fd, buf, sizeof(buf), 0);
|
||||
if (ret > 0 && ret < sizeof(buf)) {
|
||||
buf[ret] = 0;
|
||||
|
||||
enabled = strtoul(buf, NULL, 10);
|
||||
if (enabled == 1) {
|
||||
has_huge_zeropage = true;
|
||||
ksft_print_msg("[INFO] huge zeropage is enabled\n");
|
||||
}
|
||||
}
|
||||
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static void detect_hugetlbsizes(void)
|
||||
{
|
||||
DIR *dir = opendir("/sys/kernel/mm/hugepages/");
|
||||
@ -1148,6 +1175,312 @@ static int tests_per_anon_test_case(void)
|
||||
return tests;
|
||||
}
|
||||
|
||||
typedef void (*non_anon_test_fn)(char *mem, const char *smem, size_t size);
|
||||
|
||||
static void test_cow(char *mem, const char *smem, size_t size)
|
||||
{
|
||||
char *old = malloc(size);
|
||||
|
||||
/* Backup the original content. */
|
||||
memcpy(old, smem, size);
|
||||
|
||||
/* Modify the page. */
|
||||
memset(mem, 0xff, size);
|
||||
|
||||
/* See if we still read the old values via the other mapping. */
|
||||
ksft_test_result(!memcmp(smem, old, size),
|
||||
"Other mapping not modified\n");
|
||||
free(old);
|
||||
}
|
||||
|
||||
static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
|
||||
{
|
||||
char *mem, *smem, tmp;
|
||||
|
||||
ksft_print_msg("[RUN] %s ... with shared zeropage\n", desc);
|
||||
|
||||
mem = mmap(NULL, pagesize, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
smem = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
goto munmap;
|
||||
}
|
||||
|
||||
/* Read from the page to populate the shared zeropage. */
|
||||
tmp = *mem + *smem;
|
||||
asm volatile("" : "+r" (tmp));
|
||||
|
||||
fn(mem, smem, pagesize);
|
||||
munmap:
|
||||
munmap(mem, pagesize);
|
||||
if (smem != MAP_FAILED)
|
||||
munmap(smem, pagesize);
|
||||
}
|
||||
|
||||
static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
|
||||
{
|
||||
char *mem, *smem, *mmap_mem, *mmap_smem, tmp;
|
||||
size_t mmap_size;
|
||||
int ret;
|
||||
|
||||
ksft_print_msg("[RUN] %s ... with huge zeropage\n", desc);
|
||||
|
||||
if (!has_huge_zeropage) {
|
||||
ksft_test_result_skip("Huge zeropage not enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* For alignment purposes, we need twice the thp size. */
|
||||
mmap_size = 2 * thpsize;
|
||||
mmap_mem = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (mmap_mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
return;
|
||||
}
|
||||
mmap_smem = mmap(NULL, mmap_size, PROT_READ,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (mmap_smem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
goto munmap;
|
||||
}
|
||||
|
||||
/* We need a THP-aligned memory area. */
|
||||
mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1));
|
||||
smem = (char *)(((uintptr_t)mmap_smem + thpsize) & ~(thpsize - 1));
|
||||
|
||||
ret = madvise(mem, thpsize, MADV_HUGEPAGE);
|
||||
ret |= madvise(smem, thpsize, MADV_HUGEPAGE);
|
||||
if (ret) {
|
||||
ksft_test_result_fail("MADV_HUGEPAGE failed\n");
|
||||
goto munmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read from the memory to populate the huge shared zeropage. Read from
|
||||
* the first sub-page and test if we get another sub-page populated
|
||||
* automatically.
|
||||
*/
|
||||
tmp = *mem + *smem;
|
||||
asm volatile("" : "+r" (tmp));
|
||||
if (!pagemap_is_populated(pagemap_fd, mem + pagesize) ||
|
||||
!pagemap_is_populated(pagemap_fd, smem + pagesize)) {
|
||||
ksft_test_result_skip("Did not get THPs populated\n");
|
||||
goto munmap;
|
||||
}
|
||||
|
||||
fn(mem, smem, thpsize);
|
||||
munmap:
|
||||
munmap(mmap_mem, mmap_size);
|
||||
if (mmap_smem != MAP_FAILED)
|
||||
munmap(mmap_smem, mmap_size);
|
||||
}
|
||||
|
||||
static void run_with_memfd(non_anon_test_fn fn, const char *desc)
|
||||
{
|
||||
char *mem, *smem, tmp;
|
||||
int fd;
|
||||
|
||||
ksft_print_msg("[RUN] %s ... with memfd\n", desc);
|
||||
|
||||
fd = memfd_create("test", 0);
|
||||
if (fd < 0) {
|
||||
ksft_test_result_fail("memfd_create() failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* File consists of a single page filled with zeroes. */
|
||||
if (fallocate(fd, 0, 0, pagesize)) {
|
||||
ksft_test_result_fail("fallocate() failed\n");
|
||||
goto close;
|
||||
}
|
||||
|
||||
/* Create a private mapping of the memfd. */
|
||||
mem = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
goto close;
|
||||
}
|
||||
smem = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
goto munmap;
|
||||
}
|
||||
|
||||
/* Fault the page in. */
|
||||
tmp = *mem + *smem;
|
||||
asm volatile("" : "+r" (tmp));
|
||||
|
||||
fn(mem, smem, pagesize);
|
||||
munmap:
|
||||
munmap(mem, pagesize);
|
||||
if (smem != MAP_FAILED)
|
||||
munmap(smem, pagesize);
|
||||
close:
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
|
||||
{
|
||||
char *mem, *smem, tmp;
|
||||
FILE *file;
|
||||
int fd;
|
||||
|
||||
ksft_print_msg("[RUN] %s ... with tmpfile\n", desc);
|
||||
|
||||
file = tmpfile();
|
||||
if (!file) {
|
||||
ksft_test_result_fail("tmpfile() failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
fd = fileno(file);
|
||||
if (fd < 0) {
|
||||
ksft_test_result_skip("fileno() failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* File consists of a single page filled with zeroes. */
|
||||
if (fallocate(fd, 0, 0, pagesize)) {
|
||||
ksft_test_result_fail("fallocate() failed\n");
|
||||
goto close;
|
||||
}
|
||||
|
||||
/* Create a private mapping of the memfd. */
|
||||
mem = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
goto close;
|
||||
}
|
||||
smem = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
goto munmap;
|
||||
}
|
||||
|
||||
/* Fault the page in. */
|
||||
tmp = *mem + *smem;
|
||||
asm volatile("" : "+r" (tmp));
|
||||
|
||||
fn(mem, smem, pagesize);
|
||||
munmap:
|
||||
munmap(mem, pagesize);
|
||||
if (smem != MAP_FAILED)
|
||||
munmap(smem, pagesize);
|
||||
close:
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
|
||||
size_t hugetlbsize)
|
||||
{
|
||||
int flags = MFD_HUGETLB;
|
||||
char *mem, *smem, tmp;
|
||||
int fd;
|
||||
|
||||
ksft_print_msg("[RUN] %s ... with memfd hugetlb (%zu kB)\n", desc,
|
||||
hugetlbsize / 1024);
|
||||
|
||||
flags |= __builtin_ctzll(hugetlbsize) << MFD_HUGE_SHIFT;
|
||||
|
||||
fd = memfd_create("test", flags);
|
||||
if (fd < 0) {
|
||||
ksft_test_result_skip("memfd_create() failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* File consists of a single page filled with zeroes. */
|
||||
if (fallocate(fd, 0, 0, hugetlbsize)) {
|
||||
ksft_test_result_skip("need more free huge pages\n");
|
||||
goto close;
|
||||
}
|
||||
|
||||
/* Create a private mapping of the memfd. */
|
||||
mem = mmap(NULL, hugetlbsize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd,
|
||||
0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_skip("need more free huge pages\n");
|
||||
goto close;
|
||||
}
|
||||
smem = mmap(NULL, hugetlbsize, PROT_READ, MAP_SHARED, fd, 0);
|
||||
if (mem == MAP_FAILED) {
|
||||
ksft_test_result_fail("mmap() failed\n");
|
||||
goto munmap;
|
||||
}
|
||||
|
||||
/* Fault the page in. */
|
||||
tmp = *mem + *smem;
|
||||
asm volatile("" : "+r" (tmp));
|
||||
|
||||
fn(mem, smem, hugetlbsize);
|
||||
munmap:
|
||||
munmap(mem, hugetlbsize);
|
||||
if (mem != MAP_FAILED)
|
||||
munmap(smem, hugetlbsize);
|
||||
close:
|
||||
close(fd);
|
||||
}
|
||||
|
||||
struct non_anon_test_case {
|
||||
const char *desc;
|
||||
non_anon_test_fn fn;
|
||||
};
|
||||
|
||||
/*
|
||||
* Test cases that target any pages in private mappings that are non anonymous:
|
||||
* pages that may get shared via COW ndependent of fork(). This includes
|
||||
* the shared zeropage(s), pagecache pages, ...
|
||||
*/
|
||||
static const struct non_anon_test_case non_anon_test_cases[] = {
|
||||
/*
|
||||
* Basic COW test without any GUP. If we miss to break COW, changes are
|
||||
* visible via other private/shared mappings.
|
||||
*/
|
||||
{
|
||||
"Basic COW",
|
||||
test_cow,
|
||||
},
|
||||
};
|
||||
|
||||
static void run_non_anon_test_case(struct non_anon_test_case const *test_case)
|
||||
{
|
||||
int i;
|
||||
|
||||
run_with_zeropage(test_case->fn, test_case->desc);
|
||||
run_with_memfd(test_case->fn, test_case->desc);
|
||||
run_with_tmpfile(test_case->fn, test_case->desc);
|
||||
if (thpsize)
|
||||
run_with_huge_zeropage(test_case->fn, test_case->desc);
|
||||
for (i = 0; i < nr_hugetlbsizes; i++)
|
||||
run_with_memfd_hugetlb(test_case->fn, test_case->desc,
|
||||
hugetlbsizes[i]);
|
||||
}
|
||||
|
||||
static void run_non_anon_test_cases(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
ksft_print_msg("[RUN] Non-anonymous memory tests in private mappings\n");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(non_anon_test_cases); i++)
|
||||
run_non_anon_test_case(&non_anon_test_cases[i]);
|
||||
}
|
||||
|
||||
static int tests_per_non_anon_test_case(void)
|
||||
{
|
||||
int tests = 3 + nr_hugetlbsizes;
|
||||
|
||||
if (thpsize)
|
||||
tests += 1;
|
||||
return tests;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int err;
|
||||
@ -1155,9 +1488,11 @@ int main(int argc, char **argv)
|
||||
pagesize = getpagesize();
|
||||
detect_thpsize();
|
||||
detect_hugetlbsizes();
|
||||
detect_huge_zeropage();
|
||||
|
||||
ksft_print_header();
|
||||
ksft_set_plan(ARRAY_SIZE(anon_test_cases) * tests_per_anon_test_case());
|
||||
ksft_set_plan(ARRAY_SIZE(anon_test_cases) * tests_per_anon_test_case() +
|
||||
ARRAY_SIZE(non_anon_test_cases) * tests_per_non_anon_test_case());
|
||||
|
||||
gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
|
||||
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
|
||||
@ -1165,6 +1500,7 @@ int main(int argc, char **argv)
|
||||
ksft_exit_fail_msg("opening pagemap failed\n");
|
||||
|
||||
run_anon_test_cases();
|
||||
run_non_anon_test_cases();
|
||||
|
||||
err = ksft_get_fail_cnt();
|
||||
if (err)
|
||||
|
Loading…
Reference in New Issue
Block a user