mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 17:41:22 +00:00
7d6beb71da
-----BEGIN PGP SIGNATURE-----
iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCYCegywAKCRCRxhvAZXjc
ouJ6AQDlf+7jCQlQdeKKoN9QDFfMzG1ooemat36EpRRTONaGuAD8D9A4sUsG4+5f
4IU5Lj9oY4DEmF8HenbWK2ZHsesL2Qg=
=yPaw
-----END PGP SIGNATURE-----
Merge tag 'idmapped-mounts-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux
Pull idmapped mounts from Christian Brauner:
"This introduces idmapped mounts which has been in the making for some
time. Simply put, different mounts can expose the same file or
directory with different ownership. This initial implementation comes
with ports for fat, ext4 and with Christoph's port for xfs with more
filesystems being actively worked on by independent people and
maintainers.
Idmapping mounts handle a wide range of long standing use-cases. Here
are just a few:
- Idmapped mounts make it possible to easily share files between
multiple users or multiple machines especially in complex
scenarios. For example, idmapped mounts will be used in the
implementation of portable home directories in
systemd-homed.service(8) where they allow users to move their home
directory to an external storage device and use it on multiple
computers where they are assigned different uids and gids. This
effectively makes it possible to assign random uids and gids at
login time.
- It is possible to share files from the host with unprivileged
containers without having to change ownership permanently through
chown(2).
- It is possible to idmap a container's rootfs and without having to
mangle every file. For example, Chromebooks use it to share the
user's Download folder with their unprivileged containers in their
Linux subsystem.
- It is possible to share files between containers with
non-overlapping idmappings.
- Filesystem that lack a proper concept of ownership such as fat can
use idmapped mounts to implement discretionary access (DAC)
permission checking.
- They allow users to efficiently changing ownership on a per-mount
basis without having to (recursively) chown(2) all files. In
contrast to chown (2) changing ownership of large sets of files is
instantenous with idmapped mounts. This is especially useful when
ownership of a whole root filesystem of a virtual machine or
container is changed. With idmapped mounts a single syscall
mount_setattr syscall will be sufficient to change the ownership of
all files.
- Idmapped mounts always take the current ownership into account as
idmappings specify what a given uid or gid is supposed to be mapped
to. This contrasts with the chown(2) syscall which cannot by itself
take the current ownership of the files it changes into account. It
simply changes the ownership to the specified uid and gid. This is
especially problematic when recursively chown(2)ing a large set of
files which is commong with the aforementioned portable home
directory and container and vm scenario.
- Idmapped mounts allow to change ownership locally, restricting it
to specific mounts, and temporarily as the ownership changes only
apply as long as the mount exists.
Several userspace projects have either already put up patches and
pull-requests for this feature or will do so should you decide to pull
this:
- systemd: In a wide variety of scenarios but especially right away
in their implementation of portable home directories.
https://systemd.io/HOME_DIRECTORY/
- container runtimes: containerd, runC, LXD:To share data between
host and unprivileged containers, unprivileged and privileged
containers, etc. The pull request for idmapped mounts support in
containerd, the default Kubernetes runtime is already up for quite
a while now: https://github.com/containerd/containerd/pull/4734
- The virtio-fs developers and several users have expressed interest
in using this feature with virtual machines once virtio-fs is
ported.
- ChromeOS: Sharing host-directories with unprivileged containers.
I've tightly synced with all those projects and all of those listed
here have also expressed their need/desire for this feature on the
mailing list. For more info on how people use this there's a bunch of
talks about this too. Here's just two recent ones:
https://www.cncf.io/wp-content/uploads/2020/12/Rootless-Containers-in-Gitpod.pdf
https://fosdem.org/2021/schedule/event/containers_idmap/
This comes with an extensive xfstests suite covering both ext4 and
xfs:
https://git.kernel.org/brauner/xfstests-dev/h/idmapped_mounts
It covers truncation, creation, opening, xattrs, vfscaps, setid
execution, setgid inheritance and more both with idmapped and
non-idmapped mounts. It already helped to discover an unrelated xfs
setgid inheritance bug which has since been fixed in mainline. It will
be sent for inclusion with the xfstests project should you decide to
merge this.
In order to support per-mount idmappings vfsmounts are marked with
user namespaces. The idmapping of the user namespace will be used to
map the ids of vfs objects when they are accessed through that mount.
By default all vfsmounts are marked with the initial user namespace.
The initial user namespace is used to indicate that a mount is not
idmapped. All operations behave as before and this is verified in the
testsuite.
Based on prior discussions we want to attach the whole user namespace
and not just a dedicated idmapping struct. This allows us to reuse all
the helpers that already exist for dealing with idmappings instead of
introducing a whole new range of helpers. In addition, if we decide in
the future that we are confident enough to enable unprivileged users
to setup idmapped mounts the permission checking can take into account
whether the caller is privileged in the user namespace the mount is
currently marked with.
The user namespace the mount will be marked with can be specified by
passing a file descriptor refering to the user namespace as an
argument to the new mount_setattr() syscall together with the new
MOUNT_ATTR_IDMAP flag. The system call follows the openat2() pattern
of extensibility.
The following conditions must be met in order to create an idmapped
mount:
- The caller must currently have the CAP_SYS_ADMIN capability in the
user namespace the underlying filesystem has been mounted in.
- The underlying filesystem must support idmapped mounts.
- The mount must not already be idmapped. This also implies that the
idmapping of a mount cannot be altered once it has been idmapped.
- The mount must be a detached/anonymous mount, i.e. it must have
been created by calling open_tree() with the OPEN_TREE_CLONE flag
and it must not already have been visible in the filesystem.
The last two points guarantee easier semantics for userspace and the
kernel and make the implementation significantly simpler.
By default vfsmounts are marked with the initial user namespace and no
behavioral or performance changes are observed.
The manpage with a detailed description can be found here:
1d7b902e28
In order to support idmapped mounts, filesystems need to be changed
and mark themselves with the FS_ALLOW_IDMAP flag in fs_flags. The
patches to convert individual filesystem are not very large or
complicated overall as can be seen from the included fat, ext4, and
xfs ports. Patches for other filesystems are actively worked on and
will be sent out separately. The xfstestsuite can be used to verify
that port has been done correctly.
The mount_setattr() syscall is motivated independent of the idmapped
mounts patches and it's been around since July 2019. One of the most
valuable features of the new mount api is the ability to perform
mounts based on file descriptors only.
Together with the lookup restrictions available in the openat2()
RESOLVE_* flag namespace which we added in v5.6 this is the first time
we are close to hardened and race-free (e.g. symlinks) mounting and
path resolution.
While userspace has started porting to the new mount api to mount
proper filesystems and create new bind-mounts it is currently not
possible to change mount options of an already existing bind mount in
the new mount api since the mount_setattr() syscall is missing.
With the addition of the mount_setattr() syscall we remove this last
restriction and userspace can now fully port to the new mount api,
covering every use-case the old mount api could. We also add the
crucial ability to recursively change mount options for a whole mount
tree, both removing and adding mount options at the same time. This
syscall has been requested multiple times by various people and
projects.
There is a simple tool available at
https://github.com/brauner/mount-idmapped
that allows to create idmapped mounts so people can play with this
patch series. I'll add support for the regular mount binary should you
decide to pull this in the following weeks:
Here's an example to a simple idmapped mount of another user's home
directory:
u1001@f2-vm:/$ sudo ./mount --idmap both:1000:1001:1 /home/ubuntu/ /mnt
u1001@f2-vm:/$ ls -al /home/ubuntu/
total 28
drwxr-xr-x 2 ubuntu ubuntu 4096 Oct 28 22:07 .
drwxr-xr-x 4 root root 4096 Oct 28 04:00 ..
-rw------- 1 ubuntu ubuntu 3154 Oct 28 22:12 .bash_history
-rw-r--r-- 1 ubuntu ubuntu 220 Feb 25 2020 .bash_logout
-rw-r--r-- 1 ubuntu ubuntu 3771 Feb 25 2020 .bashrc
-rw-r--r-- 1 ubuntu ubuntu 807 Feb 25 2020 .profile
-rw-r--r-- 1 ubuntu ubuntu 0 Oct 16 16:11 .sudo_as_admin_successful
-rw------- 1 ubuntu ubuntu 1144 Oct 28 00:43 .viminfo
u1001@f2-vm:/$ ls -al /mnt/
total 28
drwxr-xr-x 2 u1001 u1001 4096 Oct 28 22:07 .
drwxr-xr-x 29 root root 4096 Oct 28 22:01 ..
-rw------- 1 u1001 u1001 3154 Oct 28 22:12 .bash_history
-rw-r--r-- 1 u1001 u1001 220 Feb 25 2020 .bash_logout
-rw-r--r-- 1 u1001 u1001 3771 Feb 25 2020 .bashrc
-rw-r--r-- 1 u1001 u1001 807 Feb 25 2020 .profile
-rw-r--r-- 1 u1001 u1001 0 Oct 16 16:11 .sudo_as_admin_successful
-rw------- 1 u1001 u1001 1144 Oct 28 00:43 .viminfo
u1001@f2-vm:/$ touch /mnt/my-file
u1001@f2-vm:/$ setfacl -m u:1001:rwx /mnt/my-file
u1001@f2-vm:/$ sudo setcap -n 1001 cap_net_raw+ep /mnt/my-file
u1001@f2-vm:/$ ls -al /mnt/my-file
-rw-rwxr--+ 1 u1001 u1001 0 Oct 28 22:14 /mnt/my-file
u1001@f2-vm:/$ ls -al /home/ubuntu/my-file
-rw-rwxr--+ 1 ubuntu ubuntu 0 Oct 28 22:14 /home/ubuntu/my-file
u1001@f2-vm:/$ getfacl /mnt/my-file
getfacl: Removing leading '/' from absolute path names
# file: mnt/my-file
# owner: u1001
# group: u1001
user::rw-
user:u1001:rwx
group::rw-
mask::rwx
other::r--
u1001@f2-vm:/$ getfacl /home/ubuntu/my-file
getfacl: Removing leading '/' from absolute path names
# file: home/ubuntu/my-file
# owner: ubuntu
# group: ubuntu
user::rw-
user:ubuntu:rwx
group::rw-
mask::rwx
other::r--"
* tag 'idmapped-mounts-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux: (41 commits)
xfs: remove the possibly unused mp variable in xfs_file_compat_ioctl
xfs: support idmapped mounts
ext4: support idmapped mounts
fat: handle idmapped mounts
tests: add mount_setattr() selftests
fs: introduce MOUNT_ATTR_IDMAP
fs: add mount_setattr()
fs: add attr_flags_to_mnt_flags helper
fs: split out functions to hold writers
namespace: only take read lock in do_reconfigure_mnt()
mount: make {lock,unlock}_mount_hash() static
namespace: take lock_mount_hash() directly when changing flags
nfs: do not export idmapped mounts
overlayfs: do not mount on top of idmapped mounts
ecryptfs: do not mount on top of idmapped mounts
ima: handle idmapped mounts
apparmor: handle idmapped mounts
fs: make helpers idmap mount aware
exec: handle idmapped mounts
would_dump: handle idmapped mounts
...
1231 lines
30 KiB
C
1231 lines
30 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* linux/mm/madvise.c
|
|
*
|
|
* Copyright (C) 1999 Linus Torvalds
|
|
* Copyright (C) 2002 Christoph Hellwig
|
|
*/
|
|
|
|
#include <linux/mman.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/page-isolation.h>
|
|
#include <linux/page_idle.h>
|
|
#include <linux/userfaultfd_k.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/falloc.h>
|
|
#include <linux/fadvise.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/uio.h>
|
|
#include <linux/ksm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/file.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/pagewalk.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
#include <linux/shmem_fs.h>
|
|
#include <linux/mmu_notifier.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include "internal.h"
|
|
|
|
struct madvise_walk_private {
|
|
struct mmu_gather *tlb;
|
|
bool pageout;
|
|
};
|
|
|
|
/*
|
|
* Any behaviour which results in changes to the vma->vm_flags needs to
|
|
* take mmap_lock for writing. Others, which simply traverse vmas, need
|
|
* to only take it for reading.
|
|
*/
|
|
static int madvise_need_mmap_write(int behavior)
|
|
{
|
|
switch (behavior) {
|
|
case MADV_REMOVE:
|
|
case MADV_WILLNEED:
|
|
case MADV_DONTNEED:
|
|
case MADV_COLD:
|
|
case MADV_PAGEOUT:
|
|
case MADV_FREE:
|
|
return 0;
|
|
default:
|
|
/* be safe, default to 1. list exceptions explicitly */
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* We can potentially split a vm area into separate
|
|
* areas, each area with its own behavior.
|
|
*/
|
|
static long madvise_behavior(struct vm_area_struct *vma,
|
|
struct vm_area_struct **prev,
|
|
unsigned long start, unsigned long end, int behavior)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
int error = 0;
|
|
pgoff_t pgoff;
|
|
unsigned long new_flags = vma->vm_flags;
|
|
|
|
switch (behavior) {
|
|
case MADV_NORMAL:
|
|
new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
|
|
break;
|
|
case MADV_SEQUENTIAL:
|
|
new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
|
|
break;
|
|
case MADV_RANDOM:
|
|
new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
|
|
break;
|
|
case MADV_DONTFORK:
|
|
new_flags |= VM_DONTCOPY;
|
|
break;
|
|
case MADV_DOFORK:
|
|
if (vma->vm_flags & VM_IO) {
|
|
error = -EINVAL;
|
|
goto out;
|
|
}
|
|
new_flags &= ~VM_DONTCOPY;
|
|
break;
|
|
case MADV_WIPEONFORK:
|
|
/* MADV_WIPEONFORK is only supported on anonymous memory. */
|
|
if (vma->vm_file || vma->vm_flags & VM_SHARED) {
|
|
error = -EINVAL;
|
|
goto out;
|
|
}
|
|
new_flags |= VM_WIPEONFORK;
|
|
break;
|
|
case MADV_KEEPONFORK:
|
|
new_flags &= ~VM_WIPEONFORK;
|
|
break;
|
|
case MADV_DONTDUMP:
|
|
new_flags |= VM_DONTDUMP;
|
|
break;
|
|
case MADV_DODUMP:
|
|
if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
|
|
error = -EINVAL;
|
|
goto out;
|
|
}
|
|
new_flags &= ~VM_DONTDUMP;
|
|
break;
|
|
case MADV_MERGEABLE:
|
|
case MADV_UNMERGEABLE:
|
|
error = ksm_madvise(vma, start, end, behavior, &new_flags);
|
|
if (error)
|
|
goto out_convert_errno;
|
|
break;
|
|
case MADV_HUGEPAGE:
|
|
case MADV_NOHUGEPAGE:
|
|
error = hugepage_madvise(vma, &new_flags, behavior);
|
|
if (error)
|
|
goto out_convert_errno;
|
|
break;
|
|
}
|
|
|
|
if (new_flags == vma->vm_flags) {
|
|
*prev = vma;
|
|
goto out;
|
|
}
|
|
|
|
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
|
*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
|
|
vma->vm_file, pgoff, vma_policy(vma),
|
|
vma->vm_userfaultfd_ctx);
|
|
if (*prev) {
|
|
vma = *prev;
|
|
goto success;
|
|
}
|
|
|
|
*prev = vma;
|
|
|
|
if (start != vma->vm_start) {
|
|
if (unlikely(mm->map_count >= sysctl_max_map_count)) {
|
|
error = -ENOMEM;
|
|
goto out;
|
|
}
|
|
error = __split_vma(mm, vma, start, 1);
|
|
if (error)
|
|
goto out_convert_errno;
|
|
}
|
|
|
|
if (end != vma->vm_end) {
|
|
if (unlikely(mm->map_count >= sysctl_max_map_count)) {
|
|
error = -ENOMEM;
|
|
goto out;
|
|
}
|
|
error = __split_vma(mm, vma, end, 0);
|
|
if (error)
|
|
goto out_convert_errno;
|
|
}
|
|
|
|
success:
|
|
/*
|
|
* vm_flags is protected by the mmap_lock held in write mode.
|
|
*/
|
|
vma->vm_flags = new_flags;
|
|
|
|
out_convert_errno:
|
|
/*
|
|
* madvise() returns EAGAIN if kernel resources, such as
|
|
* slab, are temporarily unavailable.
|
|
*/
|
|
if (error == -ENOMEM)
|
|
error = -EAGAIN;
|
|
out:
|
|
return error;
|
|
}
|
|
|
|
#ifdef CONFIG_SWAP
|
|
static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
|
|
unsigned long end, struct mm_walk *walk)
|
|
{
|
|
pte_t *orig_pte;
|
|
struct vm_area_struct *vma = walk->private;
|
|
unsigned long index;
|
|
|
|
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
|
|
return 0;
|
|
|
|
for (index = start; index != end; index += PAGE_SIZE) {
|
|
pte_t pte;
|
|
swp_entry_t entry;
|
|
struct page *page;
|
|
spinlock_t *ptl;
|
|
|
|
orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
|
|
pte = *(orig_pte + ((index - start) / PAGE_SIZE));
|
|
pte_unmap_unlock(orig_pte, ptl);
|
|
|
|
if (pte_present(pte) || pte_none(pte))
|
|
continue;
|
|
entry = pte_to_swp_entry(pte);
|
|
if (unlikely(non_swap_entry(entry)))
|
|
continue;
|
|
|
|
page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
|
|
vma, index, false);
|
|
if (page)
|
|
put_page(page);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct mm_walk_ops swapin_walk_ops = {
|
|
.pmd_entry = swapin_walk_pmd_entry,
|
|
};
|
|
|
|
static void force_shm_swapin_readahead(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end,
|
|
struct address_space *mapping)
|
|
{
|
|
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
|
|
pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
|
|
struct page *page;
|
|
|
|
rcu_read_lock();
|
|
xas_for_each(&xas, page, end_index) {
|
|
swp_entry_t swap;
|
|
|
|
if (!xa_is_value(page))
|
|
continue;
|
|
xas_pause(&xas);
|
|
rcu_read_unlock();
|
|
|
|
swap = radix_to_swp_entry(page);
|
|
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
|
|
NULL, 0, false);
|
|
if (page)
|
|
put_page(page);
|
|
|
|
rcu_read_lock();
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
lru_add_drain(); /* Push any new pages onto the LRU now */
|
|
}
|
|
#endif /* CONFIG_SWAP */
|
|
|
|
/*
|
|
* Schedule all required I/O operations. Do not wait for completion.
|
|
*/
|
|
static long madvise_willneed(struct vm_area_struct *vma,
|
|
struct vm_area_struct **prev,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct file *file = vma->vm_file;
|
|
loff_t offset;
|
|
|
|
*prev = vma;
|
|
#ifdef CONFIG_SWAP
|
|
if (!file) {
|
|
walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
|
|
lru_add_drain(); /* Push any new pages onto the LRU now */
|
|
return 0;
|
|
}
|
|
|
|
if (shmem_mapping(file->f_mapping)) {
|
|
force_shm_swapin_readahead(vma, start, end,
|
|
file->f_mapping);
|
|
return 0;
|
|
}
|
|
#else
|
|
if (!file)
|
|
return -EBADF;
|
|
#endif
|
|
|
|
if (IS_DAX(file_inode(file))) {
|
|
/* no bad return value, but ignore advice */
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Filesystem's fadvise may need to take various locks. We need to
|
|
* explicitly grab a reference because the vma (and hence the
|
|
* vma's reference to the file) can go away as soon as we drop
|
|
* mmap_lock.
|
|
*/
|
|
*prev = NULL; /* tell sys_madvise we drop mmap_lock */
|
|
get_file(file);
|
|
offset = (loff_t)(start - vma->vm_start)
|
|
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
|
|
mmap_read_unlock(mm);
|
|
vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
|
|
fput(file);
|
|
mmap_read_lock(mm);
|
|
return 0;
|
|
}
|
|
|
|
static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
|
|
unsigned long addr, unsigned long end,
|
|
struct mm_walk *walk)
|
|
{
|
|
struct madvise_walk_private *private = walk->private;
|
|
struct mmu_gather *tlb = private->tlb;
|
|
bool pageout = private->pageout;
|
|
struct mm_struct *mm = tlb->mm;
|
|
struct vm_area_struct *vma = walk->vma;
|
|
pte_t *orig_pte, *pte, ptent;
|
|
spinlock_t *ptl;
|
|
struct page *page = NULL;
|
|
LIST_HEAD(page_list);
|
|
|
|
if (fatal_signal_pending(current))
|
|
return -EINTR;
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
if (pmd_trans_huge(*pmd)) {
|
|
pmd_t orig_pmd;
|
|
unsigned long next = pmd_addr_end(addr, end);
|
|
|
|
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
|
|
ptl = pmd_trans_huge_lock(pmd, vma);
|
|
if (!ptl)
|
|
return 0;
|
|
|
|
orig_pmd = *pmd;
|
|
if (is_huge_zero_pmd(orig_pmd))
|
|
goto huge_unlock;
|
|
|
|
if (unlikely(!pmd_present(orig_pmd))) {
|
|
VM_BUG_ON(thp_migration_supported() &&
|
|
!is_pmd_migration_entry(orig_pmd));
|
|
goto huge_unlock;
|
|
}
|
|
|
|
page = pmd_page(orig_pmd);
|
|
|
|
/* Do not interfere with other mappings of this page */
|
|
if (page_mapcount(page) != 1)
|
|
goto huge_unlock;
|
|
|
|
if (next - addr != HPAGE_PMD_SIZE) {
|
|
int err;
|
|
|
|
get_page(page);
|
|
spin_unlock(ptl);
|
|
lock_page(page);
|
|
err = split_huge_page(page);
|
|
unlock_page(page);
|
|
put_page(page);
|
|
if (!err)
|
|
goto regular_page;
|
|
return 0;
|
|
}
|
|
|
|
if (pmd_young(orig_pmd)) {
|
|
pmdp_invalidate(vma, addr, pmd);
|
|
orig_pmd = pmd_mkold(orig_pmd);
|
|
|
|
set_pmd_at(mm, addr, pmd, orig_pmd);
|
|
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
|
|
}
|
|
|
|
ClearPageReferenced(page);
|
|
test_and_clear_page_young(page);
|
|
if (pageout) {
|
|
if (!isolate_lru_page(page)) {
|
|
if (PageUnevictable(page))
|
|
putback_lru_page(page);
|
|
else
|
|
list_add(&page->lru, &page_list);
|
|
}
|
|
} else
|
|
deactivate_page(page);
|
|
huge_unlock:
|
|
spin_unlock(ptl);
|
|
if (pageout)
|
|
reclaim_pages(&page_list);
|
|
return 0;
|
|
}
|
|
|
|
regular_page:
|
|
if (pmd_trans_unstable(pmd))
|
|
return 0;
|
|
#endif
|
|
tlb_change_page_size(tlb, PAGE_SIZE);
|
|
orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
flush_tlb_batched_pending(mm);
|
|
arch_enter_lazy_mmu_mode();
|
|
for (; addr < end; pte++, addr += PAGE_SIZE) {
|
|
ptent = *pte;
|
|
|
|
if (pte_none(ptent))
|
|
continue;
|
|
|
|
if (!pte_present(ptent))
|
|
continue;
|
|
|
|
page = vm_normal_page(vma, addr, ptent);
|
|
if (!page)
|
|
continue;
|
|
|
|
/*
|
|
* Creating a THP page is expensive so split it only if we
|
|
* are sure it's worth. Split it if we are only owner.
|
|
*/
|
|
if (PageTransCompound(page)) {
|
|
if (page_mapcount(page) != 1)
|
|
break;
|
|
get_page(page);
|
|
if (!trylock_page(page)) {
|
|
put_page(page);
|
|
break;
|
|
}
|
|
pte_unmap_unlock(orig_pte, ptl);
|
|
if (split_huge_page(page)) {
|
|
unlock_page(page);
|
|
put_page(page);
|
|
pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
break;
|
|
}
|
|
unlock_page(page);
|
|
put_page(page);
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
pte--;
|
|
addr -= PAGE_SIZE;
|
|
continue;
|
|
}
|
|
|
|
/* Do not interfere with other mappings of this page */
|
|
if (page_mapcount(page) != 1)
|
|
continue;
|
|
|
|
VM_BUG_ON_PAGE(PageTransCompound(page), page);
|
|
|
|
if (pte_young(ptent)) {
|
|
ptent = ptep_get_and_clear_full(mm, addr, pte,
|
|
tlb->fullmm);
|
|
ptent = pte_mkold(ptent);
|
|
set_pte_at(mm, addr, pte, ptent);
|
|
tlb_remove_tlb_entry(tlb, pte, addr);
|
|
}
|
|
|
|
/*
|
|
* We are deactivating a page for accelerating reclaiming.
|
|
* VM couldn't reclaim the page unless we clear PG_young.
|
|
* As a side effect, it makes confuse idle-page tracking
|
|
* because they will miss recent referenced history.
|
|
*/
|
|
ClearPageReferenced(page);
|
|
test_and_clear_page_young(page);
|
|
if (pageout) {
|
|
if (!isolate_lru_page(page)) {
|
|
if (PageUnevictable(page))
|
|
putback_lru_page(page);
|
|
else
|
|
list_add(&page->lru, &page_list);
|
|
}
|
|
} else
|
|
deactivate_page(page);
|
|
}
|
|
|
|
arch_leave_lazy_mmu_mode();
|
|
pte_unmap_unlock(orig_pte, ptl);
|
|
if (pageout)
|
|
reclaim_pages(&page_list);
|
|
cond_resched();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct mm_walk_ops cold_walk_ops = {
|
|
.pmd_entry = madvise_cold_or_pageout_pte_range,
|
|
};
|
|
|
|
static void madvise_cold_page_range(struct mmu_gather *tlb,
|
|
struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long end)
|
|
{
|
|
struct madvise_walk_private walk_private = {
|
|
.pageout = false,
|
|
.tlb = tlb,
|
|
};
|
|
|
|
tlb_start_vma(tlb, vma);
|
|
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
|
tlb_end_vma(tlb, vma);
|
|
}
|
|
|
|
static long madvise_cold(struct vm_area_struct *vma,
|
|
struct vm_area_struct **prev,
|
|
unsigned long start_addr, unsigned long end_addr)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mmu_gather tlb;
|
|
|
|
*prev = vma;
|
|
if (!can_madv_lru_vma(vma))
|
|
return -EINVAL;
|
|
|
|
lru_add_drain();
|
|
tlb_gather_mmu(&tlb, mm);
|
|
madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
|
|
tlb_finish_mmu(&tlb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void madvise_pageout_page_range(struct mmu_gather *tlb,
|
|
struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long end)
|
|
{
|
|
struct madvise_walk_private walk_private = {
|
|
.pageout = true,
|
|
.tlb = tlb,
|
|
};
|
|
|
|
tlb_start_vma(tlb, vma);
|
|
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
|
tlb_end_vma(tlb, vma);
|
|
}
|
|
|
|
static inline bool can_do_pageout(struct vm_area_struct *vma)
|
|
{
|
|
if (vma_is_anonymous(vma))
|
|
return true;
|
|
if (!vma->vm_file)
|
|
return false;
|
|
/*
|
|
* paging out pagecache only for non-anonymous mappings that correspond
|
|
* to the files the calling process could (if tried) open for writing;
|
|
* otherwise we'd be including shared non-exclusive mappings, which
|
|
* opens a side channel.
|
|
*/
|
|
return inode_owner_or_capable(&init_user_ns,
|
|
file_inode(vma->vm_file)) ||
|
|
file_permission(vma->vm_file, MAY_WRITE) == 0;
|
|
}
|
|
|
|
static long madvise_pageout(struct vm_area_struct *vma,
|
|
struct vm_area_struct **prev,
|
|
unsigned long start_addr, unsigned long end_addr)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mmu_gather tlb;
|
|
|
|
*prev = vma;
|
|
if (!can_madv_lru_vma(vma))
|
|
return -EINVAL;
|
|
|
|
if (!can_do_pageout(vma))
|
|
return 0;
|
|
|
|
lru_add_drain();
|
|
tlb_gather_mmu(&tlb, mm);
|
|
madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
|
|
tlb_finish_mmu(&tlb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end, struct mm_walk *walk)
|
|
|
|
{
|
|
struct mmu_gather *tlb = walk->private;
|
|
struct mm_struct *mm = tlb->mm;
|
|
struct vm_area_struct *vma = walk->vma;
|
|
spinlock_t *ptl;
|
|
pte_t *orig_pte, *pte, ptent;
|
|
struct page *page;
|
|
int nr_swap = 0;
|
|
unsigned long next;
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
if (pmd_trans_huge(*pmd))
|
|
if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
|
|
goto next;
|
|
|
|
if (pmd_trans_unstable(pmd))
|
|
return 0;
|
|
|
|
tlb_change_page_size(tlb, PAGE_SIZE);
|
|
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
flush_tlb_batched_pending(mm);
|
|
arch_enter_lazy_mmu_mode();
|
|
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
|
ptent = *pte;
|
|
|
|
if (pte_none(ptent))
|
|
continue;
|
|
/*
|
|
* If the pte has swp_entry, just clear page table to
|
|
* prevent swap-in which is more expensive rather than
|
|
* (page allocation + zeroing).
|
|
*/
|
|
if (!pte_present(ptent)) {
|
|
swp_entry_t entry;
|
|
|
|
entry = pte_to_swp_entry(ptent);
|
|
if (non_swap_entry(entry))
|
|
continue;
|
|
nr_swap--;
|
|
free_swap_and_cache(entry);
|
|
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
|
|
continue;
|
|
}
|
|
|
|
page = vm_normal_page(vma, addr, ptent);
|
|
if (!page)
|
|
continue;
|
|
|
|
/*
|
|
* If pmd isn't transhuge but the page is THP and
|
|
* is owned by only this process, split it and
|
|
* deactivate all pages.
|
|
*/
|
|
if (PageTransCompound(page)) {
|
|
if (page_mapcount(page) != 1)
|
|
goto out;
|
|
get_page(page);
|
|
if (!trylock_page(page)) {
|
|
put_page(page);
|
|
goto out;
|
|
}
|
|
pte_unmap_unlock(orig_pte, ptl);
|
|
if (split_huge_page(page)) {
|
|
unlock_page(page);
|
|
put_page(page);
|
|
pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
goto out;
|
|
}
|
|
unlock_page(page);
|
|
put_page(page);
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
pte--;
|
|
addr -= PAGE_SIZE;
|
|
continue;
|
|
}
|
|
|
|
VM_BUG_ON_PAGE(PageTransCompound(page), page);
|
|
|
|
if (PageSwapCache(page) || PageDirty(page)) {
|
|
if (!trylock_page(page))
|
|
continue;
|
|
/*
|
|
* If page is shared with others, we couldn't clear
|
|
* PG_dirty of the page.
|
|
*/
|
|
if (page_mapcount(page) != 1) {
|
|
unlock_page(page);
|
|
continue;
|
|
}
|
|
|
|
if (PageSwapCache(page) && !try_to_free_swap(page)) {
|
|
unlock_page(page);
|
|
continue;
|
|
}
|
|
|
|
ClearPageDirty(page);
|
|
unlock_page(page);
|
|
}
|
|
|
|
if (pte_young(ptent) || pte_dirty(ptent)) {
|
|
/*
|
|
* Some of architecture(ex, PPC) don't update TLB
|
|
* with set_pte_at and tlb_remove_tlb_entry so for
|
|
* the portability, remap the pte with old|clean
|
|
* after pte clearing.
|
|
*/
|
|
ptent = ptep_get_and_clear_full(mm, addr, pte,
|
|
tlb->fullmm);
|
|
|
|
ptent = pte_mkold(ptent);
|
|
ptent = pte_mkclean(ptent);
|
|
set_pte_at(mm, addr, pte, ptent);
|
|
tlb_remove_tlb_entry(tlb, pte, addr);
|
|
}
|
|
mark_page_lazyfree(page);
|
|
}
|
|
out:
|
|
if (nr_swap) {
|
|
if (current->mm == mm)
|
|
sync_mm_rss(mm);
|
|
|
|
add_mm_counter(mm, MM_SWAPENTS, nr_swap);
|
|
}
|
|
arch_leave_lazy_mmu_mode();
|
|
pte_unmap_unlock(orig_pte, ptl);
|
|
cond_resched();
|
|
next:
|
|
return 0;
|
|
}
|
|
|
|
static const struct mm_walk_ops madvise_free_walk_ops = {
|
|
.pmd_entry = madvise_free_pte_range,
|
|
};
|
|
|
|
static int madvise_free_single_vma(struct vm_area_struct *vma,
|
|
unsigned long start_addr, unsigned long end_addr)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mmu_notifier_range range;
|
|
struct mmu_gather tlb;
|
|
|
|
/* MADV_FREE works for only anon vma at the moment */
|
|
if (!vma_is_anonymous(vma))
|
|
return -EINVAL;
|
|
|
|
range.start = max(vma->vm_start, start_addr);
|
|
if (range.start >= vma->vm_end)
|
|
return -EINVAL;
|
|
range.end = min(vma->vm_end, end_addr);
|
|
if (range.end <= vma->vm_start)
|
|
return -EINVAL;
|
|
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
|
|
range.start, range.end);
|
|
|
|
lru_add_drain();
|
|
tlb_gather_mmu(&tlb, mm);
|
|
update_hiwater_rss(mm);
|
|
|
|
mmu_notifier_invalidate_range_start(&range);
|
|
tlb_start_vma(&tlb, vma);
|
|
walk_page_range(vma->vm_mm, range.start, range.end,
|
|
&madvise_free_walk_ops, &tlb);
|
|
tlb_end_vma(&tlb, vma);
|
|
mmu_notifier_invalidate_range_end(&range);
|
|
tlb_finish_mmu(&tlb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Application no longer needs these pages. If the pages are dirty,
|
|
* it's OK to just throw them away. The app will be more careful about
|
|
* data it wants to keep. Be sure to free swap resources too. The
|
|
* zap_page_range call sets things up for shrink_active_list to actually free
|
|
* these pages later if no one else has touched them in the meantime,
|
|
* although we could add these pages to a global reuse list for
|
|
* shrink_active_list to pick up before reclaiming other pages.
|
|
*
|
|
* NB: This interface discards data rather than pushes it out to swap,
|
|
* as some implementations do. This has performance implications for
|
|
* applications like large transactional databases which want to discard
|
|
* pages in anonymous maps after committing to backing store the data
|
|
* that was kept in them. There is no reason to write this data out to
|
|
* the swap area if the application is discarding it.
|
|
*
|
|
* An interface that causes the system to free clean pages and flush
|
|
* dirty pages is already available as msync(MS_INVALIDATE).
|
|
*/
|
|
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
zap_page_range(vma, start, end - start);
|
|
return 0;
|
|
}
|
|
|
|
static long madvise_dontneed_free(struct vm_area_struct *vma,
|
|
struct vm_area_struct **prev,
|
|
unsigned long start, unsigned long end,
|
|
int behavior)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
*prev = vma;
|
|
if (!can_madv_lru_vma(vma))
|
|
return -EINVAL;
|
|
|
|
if (!userfaultfd_remove(vma, start, end)) {
|
|
*prev = NULL; /* mmap_lock has been dropped, prev is stale */
|
|
|
|
mmap_read_lock(mm);
|
|
vma = find_vma(mm, start);
|
|
if (!vma)
|
|
return -ENOMEM;
|
|
if (start < vma->vm_start) {
|
|
/*
|
|
* This "vma" under revalidation is the one
|
|
* with the lowest vma->vm_start where start
|
|
* is also < vma->vm_end. If start <
|
|
* vma->vm_start it means an hole materialized
|
|
* in the user address space within the
|
|
* virtual range passed to MADV_DONTNEED
|
|
* or MADV_FREE.
|
|
*/
|
|
return -ENOMEM;
|
|
}
|
|
if (!can_madv_lru_vma(vma))
|
|
return -EINVAL;
|
|
if (end > vma->vm_end) {
|
|
/*
|
|
* Don't fail if end > vma->vm_end. If the old
|
|
* vma was splitted while the mmap_lock was
|
|
* released the effect of the concurrent
|
|
* operation may not cause madvise() to
|
|
* have an undefined result. There may be an
|
|
* adjacent next vma that we'll walk
|
|
* next. userfaultfd_remove() will generate an
|
|
* UFFD_EVENT_REMOVE repetition on the
|
|
* end-vma->vm_end range, but the manager can
|
|
* handle a repetition fine.
|
|
*/
|
|
end = vma->vm_end;
|
|
}
|
|
VM_WARN_ON(start >= end);
|
|
}
|
|
|
|
if (behavior == MADV_DONTNEED)
|
|
return madvise_dontneed_single_vma(vma, start, end);
|
|
else if (behavior == MADV_FREE)
|
|
return madvise_free_single_vma(vma, start, end);
|
|
else
|
|
return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
* Application wants to free up the pages and associated backing store.
|
|
* This is effectively punching a hole into the middle of a file.
|
|
*/
|
|
static long madvise_remove(struct vm_area_struct *vma,
|
|
struct vm_area_struct **prev,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
loff_t offset;
|
|
int error;
|
|
struct file *f;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
*prev = NULL; /* tell sys_madvise we drop mmap_lock */
|
|
|
|
if (vma->vm_flags & VM_LOCKED)
|
|
return -EINVAL;
|
|
|
|
f = vma->vm_file;
|
|
|
|
if (!f || !f->f_mapping || !f->f_mapping->host) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
|
|
return -EACCES;
|
|
|
|
offset = (loff_t)(start - vma->vm_start)
|
|
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
|
|
|
|
/*
|
|
* Filesystem's fallocate may need to take i_mutex. We need to
|
|
* explicitly grab a reference because the vma (and hence the
|
|
* vma's reference to the file) can go away as soon as we drop
|
|
* mmap_lock.
|
|
*/
|
|
get_file(f);
|
|
if (userfaultfd_remove(vma, start, end)) {
|
|
/* mmap_lock was not released by userfaultfd_remove() */
|
|
mmap_read_unlock(mm);
|
|
}
|
|
error = vfs_fallocate(f,
|
|
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
|
offset, end - start);
|
|
fput(f);
|
|
mmap_read_lock(mm);
|
|
return error;
|
|
}
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
/*
|
|
* Error injection support for memory error handling.
|
|
*/
|
|
static int madvise_inject_error(int behavior,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
unsigned long size;
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
|
|
for (; start < end; start += size) {
|
|
unsigned long pfn;
|
|
struct page *page;
|
|
int ret;
|
|
|
|
ret = get_user_pages_fast(start, 1, 0, &page);
|
|
if (ret != 1)
|
|
return ret;
|
|
pfn = page_to_pfn(page);
|
|
|
|
/*
|
|
* When soft offlining hugepages, after migrating the page
|
|
* we dissolve it, therefore in the second loop "page" will
|
|
* no longer be a compound page.
|
|
*/
|
|
size = page_size(compound_head(page));
|
|
|
|
if (behavior == MADV_SOFT_OFFLINE) {
|
|
pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
|
|
pfn, start);
|
|
ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
|
|
} else {
|
|
pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
|
|
pfn, start);
|
|
ret = memory_failure(pfn, MF_COUNT_INCREASED);
|
|
}
|
|
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static long
|
|
madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
|
|
unsigned long start, unsigned long end, int behavior)
|
|
{
|
|
switch (behavior) {
|
|
case MADV_REMOVE:
|
|
return madvise_remove(vma, prev, start, end);
|
|
case MADV_WILLNEED:
|
|
return madvise_willneed(vma, prev, start, end);
|
|
case MADV_COLD:
|
|
return madvise_cold(vma, prev, start, end);
|
|
case MADV_PAGEOUT:
|
|
return madvise_pageout(vma, prev, start, end);
|
|
case MADV_FREE:
|
|
case MADV_DONTNEED:
|
|
return madvise_dontneed_free(vma, prev, start, end, behavior);
|
|
default:
|
|
return madvise_behavior(vma, prev, start, end, behavior);
|
|
}
|
|
}
|
|
|
|
static bool
|
|
madvise_behavior_valid(int behavior)
|
|
{
|
|
switch (behavior) {
|
|
case MADV_DOFORK:
|
|
case MADV_DONTFORK:
|
|
case MADV_NORMAL:
|
|
case MADV_SEQUENTIAL:
|
|
case MADV_RANDOM:
|
|
case MADV_REMOVE:
|
|
case MADV_WILLNEED:
|
|
case MADV_DONTNEED:
|
|
case MADV_FREE:
|
|
case MADV_COLD:
|
|
case MADV_PAGEOUT:
|
|
#ifdef CONFIG_KSM
|
|
case MADV_MERGEABLE:
|
|
case MADV_UNMERGEABLE:
|
|
#endif
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
case MADV_HUGEPAGE:
|
|
case MADV_NOHUGEPAGE:
|
|
#endif
|
|
case MADV_DONTDUMP:
|
|
case MADV_DODUMP:
|
|
case MADV_WIPEONFORK:
|
|
case MADV_KEEPONFORK:
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
case MADV_SOFT_OFFLINE:
|
|
case MADV_HWPOISON:
|
|
#endif
|
|
return true;
|
|
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
static bool
|
|
process_madvise_behavior_valid(int behavior)
|
|
{
|
|
switch (behavior) {
|
|
case MADV_COLD:
|
|
case MADV_PAGEOUT:
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* The madvise(2) system call.
|
|
*
|
|
* Applications can use madvise() to advise the kernel how it should
|
|
* handle paging I/O in this VM area. The idea is to help the kernel
|
|
* use appropriate read-ahead and caching techniques. The information
|
|
* provided is advisory only, and can be safely disregarded by the
|
|
* kernel without affecting the correct operation of the application.
|
|
*
|
|
* behavior values:
|
|
* MADV_NORMAL - the default behavior is to read clusters. This
|
|
* results in some read-ahead and read-behind.
|
|
* MADV_RANDOM - the system should read the minimum amount of data
|
|
* on any access, since it is unlikely that the appli-
|
|
* cation will need more than what it asks for.
|
|
* MADV_SEQUENTIAL - pages in the given range will probably be accessed
|
|
* once, so they can be aggressively read ahead, and
|
|
* can be freed soon after they are accessed.
|
|
* MADV_WILLNEED - the application is notifying the system to read
|
|
* some pages ahead.
|
|
* MADV_DONTNEED - the application is finished with the given range,
|
|
* so the kernel can free resources associated with it.
|
|
* MADV_FREE - the application marks pages in the given range as lazy free,
|
|
* where actual purges are postponed until memory pressure happens.
|
|
* MADV_REMOVE - the application wants to free up the given range of
|
|
* pages and associated backing store.
|
|
* MADV_DONTFORK - omit this area from child's address space when forking:
|
|
* typically, to avoid COWing pages pinned by get_user_pages().
|
|
* MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
|
|
* MADV_WIPEONFORK - present the child process with zero-filled memory in this
|
|
* range after a fork.
|
|
* MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
|
|
* MADV_HWPOISON - trigger memory error handler as if the given memory range
|
|
* were corrupted by unrecoverable hardware memory failure.
|
|
* MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
|
|
* MADV_MERGEABLE - the application recommends that KSM try to merge pages in
|
|
* this area with pages of identical content from other such areas.
|
|
* MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
|
|
* MADV_HUGEPAGE - the application wants to back the given range by transparent
|
|
* huge pages in the future. Existing pages might be coalesced and
|
|
* new pages might be allocated as THP.
|
|
* MADV_NOHUGEPAGE - mark the given range as not worth being backed by
|
|
* transparent huge pages so the existing pages will not be
|
|
* coalesced into THP and new pages will not be allocated as THP.
|
|
* MADV_DONTDUMP - the application wants to prevent pages in the given range
|
|
* from being included in its core dump.
|
|
* MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
|
|
* MADV_COLD - the application is not expected to use this memory soon,
|
|
* deactivate pages in this range so that they can be reclaimed
|
|
* easily if memory pressure hanppens.
|
|
* MADV_PAGEOUT - the application is not expected to use this memory soon,
|
|
* page out the pages in this range immediately.
|
|
*
|
|
* return values:
|
|
* zero - success
|
|
* -EINVAL - start + len < 0, start is not page-aligned,
|
|
* "behavior" is not a valid value, or application
|
|
* is attempting to release locked or shared pages,
|
|
* or the specified address range includes file, Huge TLB,
|
|
* MAP_SHARED or VMPFNMAP range.
|
|
* -ENOMEM - addresses in the specified range are not currently
|
|
* mapped, or are outside the AS of the process.
|
|
* -EIO - an I/O error occurred while paging in data.
|
|
* -EBADF - map exists, but area maps something that isn't a file.
|
|
* -EAGAIN - a kernel resource was temporarily unavailable.
|
|
*/
|
|
int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
|
|
{
|
|
unsigned long end, tmp;
|
|
struct vm_area_struct *vma, *prev;
|
|
int unmapped_error = 0;
|
|
int error = -EINVAL;
|
|
int write;
|
|
size_t len;
|
|
struct blk_plug plug;
|
|
|
|
start = untagged_addr(start);
|
|
|
|
if (!madvise_behavior_valid(behavior))
|
|
return error;
|
|
|
|
if (!PAGE_ALIGNED(start))
|
|
return error;
|
|
len = PAGE_ALIGN(len_in);
|
|
|
|
/* Check to see whether len was rounded up from small -ve to zero */
|
|
if (len_in && !len)
|
|
return error;
|
|
|
|
end = start + len;
|
|
if (end < start)
|
|
return error;
|
|
|
|
error = 0;
|
|
if (end == start)
|
|
return error;
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
|
|
return madvise_inject_error(behavior, start, start + len_in);
|
|
#endif
|
|
|
|
write = madvise_need_mmap_write(behavior);
|
|
if (write) {
|
|
if (mmap_write_lock_killable(mm))
|
|
return -EINTR;
|
|
} else {
|
|
mmap_read_lock(mm);
|
|
}
|
|
|
|
/*
|
|
* If the interval [start,end) covers some unmapped address
|
|
* ranges, just ignore them, but return -ENOMEM at the end.
|
|
* - different from the way of handling in mlock etc.
|
|
*/
|
|
vma = find_vma_prev(mm, start, &prev);
|
|
if (vma && start > vma->vm_start)
|
|
prev = vma;
|
|
|
|
blk_start_plug(&plug);
|
|
for (;;) {
|
|
/* Still start < end. */
|
|
error = -ENOMEM;
|
|
if (!vma)
|
|
goto out;
|
|
|
|
/* Here start < (end|vma->vm_end). */
|
|
if (start < vma->vm_start) {
|
|
unmapped_error = -ENOMEM;
|
|
start = vma->vm_start;
|
|
if (start >= end)
|
|
goto out;
|
|
}
|
|
|
|
/* Here vma->vm_start <= start < (end|vma->vm_end) */
|
|
tmp = vma->vm_end;
|
|
if (end < tmp)
|
|
tmp = end;
|
|
|
|
/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
|
|
error = madvise_vma(vma, &prev, start, tmp, behavior);
|
|
if (error)
|
|
goto out;
|
|
start = tmp;
|
|
if (prev && start < prev->vm_end)
|
|
start = prev->vm_end;
|
|
error = unmapped_error;
|
|
if (start >= end)
|
|
goto out;
|
|
if (prev)
|
|
vma = prev->vm_next;
|
|
else /* madvise_remove dropped mmap_lock */
|
|
vma = find_vma(mm, start);
|
|
}
|
|
out:
|
|
blk_finish_plug(&plug);
|
|
if (write)
|
|
mmap_write_unlock(mm);
|
|
else
|
|
mmap_read_unlock(mm);
|
|
|
|
return error;
|
|
}
|
|
|
|
SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
|
|
{
|
|
return do_madvise(current->mm, start, len_in, behavior);
|
|
}
|
|
|
|
SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
|
|
size_t, vlen, int, behavior, unsigned int, flags)
|
|
{
|
|
ssize_t ret;
|
|
struct iovec iovstack[UIO_FASTIOV], iovec;
|
|
struct iovec *iov = iovstack;
|
|
struct iov_iter iter;
|
|
struct pid *pid;
|
|
struct task_struct *task;
|
|
struct mm_struct *mm;
|
|
size_t total_len;
|
|
unsigned int f_flags;
|
|
|
|
if (flags != 0) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
|
|
if (ret < 0)
|
|
goto out;
|
|
|
|
pid = pidfd_get_pid(pidfd, &f_flags);
|
|
if (IS_ERR(pid)) {
|
|
ret = PTR_ERR(pid);
|
|
goto free_iov;
|
|
}
|
|
|
|
task = get_pid_task(pid, PIDTYPE_PID);
|
|
if (!task) {
|
|
ret = -ESRCH;
|
|
goto put_pid;
|
|
}
|
|
|
|
if (!process_madvise_behavior_valid(behavior)) {
|
|
ret = -EINVAL;
|
|
goto release_task;
|
|
}
|
|
|
|
mm = mm_access(task, PTRACE_MODE_ATTACH_FSCREDS);
|
|
if (IS_ERR_OR_NULL(mm)) {
|
|
ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
|
|
goto release_task;
|
|
}
|
|
|
|
total_len = iov_iter_count(&iter);
|
|
|
|
while (iov_iter_count(&iter)) {
|
|
iovec = iov_iter_iovec(&iter);
|
|
ret = do_madvise(mm, (unsigned long)iovec.iov_base,
|
|
iovec.iov_len, behavior);
|
|
if (ret < 0)
|
|
break;
|
|
iov_iter_advance(&iter, iovec.iov_len);
|
|
}
|
|
|
|
if (ret == 0)
|
|
ret = total_len - iov_iter_count(&iter);
|
|
|
|
mmput(mm);
|
|
release_task:
|
|
put_task_struct(task);
|
|
put_pid:
|
|
put_pid(pid);
|
|
free_iov:
|
|
kfree(iov);
|
|
out:
|
|
return ret;
|
|
}
|