mirror of
https://github.com/torvalds/linux.git
synced 2024-12-11 13:41:55 +00:00
8a144612eb
We expect a file page access after dropping caches should be a major fault, but sometimes it's still a minor fault. That's because a file page can't be dropped if it's in a per-cpu pagevec. Draining all pages from per-cpu pagevec to lru list before trying to drop caches. Link: https://lkml.kernel.org/r/20230630092203.16080-1-andrew.yang@mediatek.com Signed-off-by: Andrew Yang <andrew.yang@mediatek.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Matthias Brugger <matthias.bgg@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
80 lines
1.9 KiB
C
80 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Implement the manual drop-all-pagecache function
|
|
*/
|
|
|
|
#include <linux/pagemap.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/swap.h>
|
|
#include "internal.h"
|
|
|
|
/* A global variable is a bit ugly, but it keeps the code simple */
|
|
int sysctl_drop_caches;
|
|
|
|
static void drop_pagecache_sb(struct super_block *sb, void *unused)
|
|
{
|
|
struct inode *inode, *toput_inode = NULL;
|
|
|
|
spin_lock(&sb->s_inode_list_lock);
|
|
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
|
|
spin_lock(&inode->i_lock);
|
|
/*
|
|
* We must skip inodes in unusual state. We may also skip
|
|
* inodes without pages but we deliberately won't in case
|
|
* we need to reschedule to avoid softlockups.
|
|
*/
|
|
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
|
|
(mapping_empty(inode->i_mapping) && !need_resched())) {
|
|
spin_unlock(&inode->i_lock);
|
|
continue;
|
|
}
|
|
__iget(inode);
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&sb->s_inode_list_lock);
|
|
|
|
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
|
iput(toput_inode);
|
|
toput_inode = inode;
|
|
|
|
cond_resched();
|
|
spin_lock(&sb->s_inode_list_lock);
|
|
}
|
|
spin_unlock(&sb->s_inode_list_lock);
|
|
iput(toput_inode);
|
|
}
|
|
|
|
int drop_caches_sysctl_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *length, loff_t *ppos)
|
|
{
|
|
int ret;
|
|
|
|
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
|
if (ret)
|
|
return ret;
|
|
if (write) {
|
|
static int stfu;
|
|
|
|
if (sysctl_drop_caches & 1) {
|
|
lru_add_drain_all();
|
|
iterate_supers(drop_pagecache_sb, NULL);
|
|
count_vm_event(DROP_PAGECACHE);
|
|
}
|
|
if (sysctl_drop_caches & 2) {
|
|
drop_slab();
|
|
count_vm_event(DROP_SLAB);
|
|
}
|
|
if (!stfu) {
|
|
pr_info("%s (%d): drop_caches: %d\n",
|
|
current->comm, task_pid_nr(current),
|
|
sysctl_drop_caches);
|
|
}
|
|
stfu |= sysctl_drop_caches & 4;
|
|
}
|
|
return 0;
|
|
}
|