2021-11-18 08:58:08 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/* CacheFiles path walking and related routines
|
|
|
|
*
|
|
|
|
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
2021-10-21 07:34:55 +00:00
|
|
|
#include <linux/namei.h>
|
2021-11-18 08:58:08 +00:00
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark the backing file as being a cache file if it's not already in use. The
|
|
|
|
* mark tells the culling request command that it's not allowed to cull the
|
|
|
|
* file or directory. The caller must hold the inode lock.
|
|
|
|
*/
|
|
|
|
static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
|
2022-09-24 04:59:59 +00:00
|
|
|
struct inode *inode)
|
2021-11-18 08:58:08 +00:00
|
|
|
{
|
|
|
|
bool can_use = false;
|
|
|
|
|
|
|
|
if (!(inode->i_flags & S_KERNEL_FILE)) {
|
|
|
|
inode->i_flags |= S_KERNEL_FILE;
|
|
|
|
trace_cachefiles_mark_active(object, inode);
|
|
|
|
can_use = true;
|
|
|
|
} else {
|
2022-01-14 11:05:13 +00:00
|
|
|
trace_cachefiles_mark_failed(object, inode);
|
2021-11-18 08:58:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return can_use;
|
|
|
|
}
|
|
|
|
|
2021-11-18 08:58:08 +00:00
|
|
|
static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
|
2022-09-24 04:59:59 +00:00
|
|
|
struct inode *inode)
|
2021-11-18 08:58:08 +00:00
|
|
|
{
|
|
|
|
bool can_use;
|
|
|
|
|
|
|
|
inode_lock(inode);
|
2022-09-24 04:59:59 +00:00
|
|
|
can_use = __cachefiles_mark_inode_in_use(object, inode);
|
2021-11-18 08:58:08 +00:00
|
|
|
inode_unlock(inode);
|
|
|
|
return can_use;
|
|
|
|
}
|
|
|
|
|
2021-11-18 08:58:08 +00:00
|
|
|
/*
|
|
|
|
* Unmark a backing inode. The caller must hold the inode lock.
|
|
|
|
*/
|
|
|
|
static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
|
2022-09-24 04:59:59 +00:00
|
|
|
struct inode *inode)
|
2021-11-18 08:58:08 +00:00
|
|
|
{
|
|
|
|
inode->i_flags &= ~S_KERNEL_FILE;
|
|
|
|
trace_cachefiles_mark_inactive(object, inode);
|
|
|
|
}
|
2021-10-21 07:34:55 +00:00
|
|
|
|
2022-03-30 09:47:59 +00:00
|
|
|
static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
|
2022-09-24 04:59:59 +00:00
|
|
|
struct inode *inode)
|
2022-03-30 09:47:59 +00:00
|
|
|
{
|
|
|
|
inode_lock(inode);
|
2022-09-24 04:59:59 +00:00
|
|
|
__cachefiles_unmark_inode_in_use(object, inode);
|
2022-03-30 09:47:59 +00:00
|
|
|
inode_unlock(inode);
|
|
|
|
}
|
|
|
|
|
2021-11-18 08:58:08 +00:00
|
|
|
/*
|
|
|
|
* Unmark a backing inode and tell cachefilesd that there's something that can
|
|
|
|
* be culled.
|
|
|
|
*/
|
|
|
|
void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
|
|
|
|
struct file *file)
|
|
|
|
{
|
|
|
|
struct cachefiles_cache *cache = object->volume->cache;
|
|
|
|
struct inode *inode = file_inode(file);
|
|
|
|
|
2022-09-24 04:59:59 +00:00
|
|
|
cachefiles_do_unmark_inode_in_use(object, inode);
|
2021-11-18 08:58:08 +00:00
|
|
|
|
2022-09-24 04:59:59 +00:00
|
|
|
if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
|
|
|
|
atomic_long_add(inode->i_blocks, &cache->b_released);
|
|
|
|
if (atomic_inc_return(&cache->f_released))
|
|
|
|
cachefiles_state_changed(cache);
|
2021-11-18 08:58:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:34:55 +00:00
|
|
|
/*
|
|
|
|
* get a subdirectory
|
|
|
|
*/
|
|
|
|
struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
|
|
|
|
struct dentry *dir,
|
|
|
|
const char *dirname,
|
|
|
|
bool *_is_new)
|
|
|
|
{
|
|
|
|
struct dentry *subdir;
|
|
|
|
struct path path;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter(",,%s", dirname);
|
|
|
|
|
|
|
|
/* search the current directory for the element name */
|
|
|
|
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
|
|
|
|
|
|
|
|
retry:
|
|
|
|
ret = cachefiles_inject_read_error();
|
|
|
|
if (ret == 0)
|
|
|
|
subdir = lookup_one_len(dirname, dir, strlen(dirname));
|
|
|
|
else
|
|
|
|
subdir = ERR_PTR(ret);
|
2022-01-14 11:44:54 +00:00
|
|
|
trace_cachefiles_lookup(NULL, dir, subdir);
|
2021-10-21 07:34:55 +00:00
|
|
|
if (IS_ERR(subdir)) {
|
|
|
|
trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
|
|
|
|
PTR_ERR(subdir),
|
|
|
|
cachefiles_trace_lookup_error);
|
|
|
|
if (PTR_ERR(subdir) == -ENOMEM)
|
|
|
|
goto nomem_d_alloc;
|
|
|
|
goto lookup_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
_debug("subdir -> %pd %s",
|
|
|
|
subdir, d_backing_inode(subdir) ? "positive" : "negative");
|
|
|
|
|
|
|
|
/* we need to create the subdir if it doesn't exist yet */
|
|
|
|
if (d_is_negative(subdir)) {
|
2021-10-21 20:58:29 +00:00
|
|
|
ret = cachefiles_has_space(cache, 1, 0,
|
|
|
|
cachefiles_has_space_for_create);
|
2021-10-21 07:34:55 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto mkdir_error;
|
|
|
|
|
|
|
|
_debug("attempt mkdir");
|
|
|
|
|
|
|
|
path.mnt = cache->mnt;
|
|
|
|
path.dentry = dir;
|
|
|
|
ret = security_path_mkdir(&path, subdir, 0700);
|
|
|
|
if (ret < 0)
|
|
|
|
goto mkdir_error;
|
|
|
|
ret = cachefiles_inject_write_error();
|
|
|
|
if (ret == 0)
|
2023-01-13 11:49:10 +00:00
|
|
|
ret = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
|
2021-10-21 07:34:55 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
|
|
|
|
cachefiles_trace_mkdir_error);
|
|
|
|
goto mkdir_error;
|
|
|
|
}
|
2022-01-14 11:44:54 +00:00
|
|
|
trace_cachefiles_mkdir(dir, subdir);
|
2021-10-21 07:34:55 +00:00
|
|
|
|
|
|
|
if (unlikely(d_unhashed(subdir))) {
|
|
|
|
cachefiles_put_directory(subdir);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
ASSERT(d_backing_inode(subdir));
|
|
|
|
|
|
|
|
_debug("mkdir -> %pd{ino=%lu}",
|
|
|
|
subdir, d_backing_inode(subdir)->i_ino);
|
|
|
|
if (_is_new)
|
|
|
|
*_is_new = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tell rmdir() it's not allowed to delete the subdir */
|
|
|
|
inode_lock(d_inode(subdir));
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
|
2022-09-24 04:59:59 +00:00
|
|
|
if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
|
|
|
|
pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
|
|
|
|
subdir, d_inode(subdir)->i_ino);
|
2021-10-21 07:34:55 +00:00
|
|
|
goto mark_error;
|
2022-09-24 04:59:59 +00:00
|
|
|
}
|
2021-10-21 07:34:55 +00:00
|
|
|
|
|
|
|
inode_unlock(d_inode(subdir));
|
|
|
|
|
|
|
|
/* we need to make sure the subdir is a directory */
|
|
|
|
ASSERT(d_backing_inode(subdir));
|
|
|
|
|
|
|
|
if (!d_can_lookup(subdir)) {
|
|
|
|
pr_err("%s is not a directory\n", dirname);
|
|
|
|
ret = -EIO;
|
|
|
|
goto check_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -EPERM;
|
|
|
|
if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
|
|
|
|
!d_backing_inode(subdir)->i_op->lookup ||
|
|
|
|
!d_backing_inode(subdir)->i_op->mkdir ||
|
|
|
|
!d_backing_inode(subdir)->i_op->rename ||
|
|
|
|
!d_backing_inode(subdir)->i_op->rmdir ||
|
|
|
|
!d_backing_inode(subdir)->i_op->unlink)
|
|
|
|
goto check_error;
|
|
|
|
|
|
|
|
_leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
|
|
|
|
return subdir;
|
|
|
|
|
|
|
|
check_error:
|
|
|
|
cachefiles_put_directory(subdir);
|
|
|
|
_leave(" = %d [check]", ret);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
mark_error:
|
|
|
|
inode_unlock(d_inode(subdir));
|
|
|
|
dput(subdir);
|
|
|
|
return ERR_PTR(-EBUSY);
|
|
|
|
|
|
|
|
mkdir_error:
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
dput(subdir);
|
|
|
|
pr_err("mkdir %s failed with error %d\n", dirname, ret);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
lookup_error:
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
ret = PTR_ERR(subdir);
|
|
|
|
pr_err("Lookup %s failed with error %d\n", dirname, ret);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
nomem_d_alloc:
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
_leave(" = -ENOMEM");
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put a subdirectory.
|
|
|
|
*/
|
|
|
|
void cachefiles_put_directory(struct dentry *dir)
|
|
|
|
{
|
|
|
|
if (dir) {
|
2022-09-24 04:59:59 +00:00
|
|
|
cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
|
2021-10-21 07:34:55 +00:00
|
|
|
dput(dir);
|
|
|
|
}
|
|
|
|
}
|
2021-10-21 07:50:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a regular file from the cache.
|
|
|
|
*/
|
|
|
|
static int cachefiles_unlink(struct cachefiles_cache *cache,
|
|
|
|
struct cachefiles_object *object,
|
|
|
|
struct dentry *dir, struct dentry *dentry,
|
|
|
|
enum fscache_why_object_killed why)
|
|
|
|
{
|
|
|
|
struct path path = {
|
|
|
|
.mnt = cache->mnt,
|
|
|
|
.dentry = dir,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
2022-01-14 11:44:54 +00:00
|
|
|
trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
|
2021-10-21 07:50:10 +00:00
|
|
|
ret = security_path_unlink(&path, dentry);
|
|
|
|
if (ret < 0) {
|
|
|
|
cachefiles_io_error(cache, "Unlink security error");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = cachefiles_inject_remove_error();
|
|
|
|
if (ret == 0) {
|
2023-01-13 11:49:10 +00:00
|
|
|
ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL);
|
2021-10-21 07:50:10 +00:00
|
|
|
if (ret == -EIO)
|
|
|
|
cachefiles_io_error(cache, "Unlink failed");
|
|
|
|
}
|
|
|
|
if (ret != 0)
|
|
|
|
trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
|
|
|
|
cachefiles_trace_unlink_error);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete an object representation from the cache
|
|
|
|
* - File backed objects are unlinked
|
|
|
|
* - Directory backed objects are stuffed into the graveyard for userspace to
|
|
|
|
* delete
|
|
|
|
*/
|
|
|
|
int cachefiles_bury_object(struct cachefiles_cache *cache,
|
|
|
|
struct cachefiles_object *object,
|
|
|
|
struct dentry *dir,
|
|
|
|
struct dentry *rep,
|
|
|
|
enum fscache_why_object_killed why)
|
|
|
|
{
|
|
|
|
struct dentry *grave, *trap;
|
|
|
|
struct path path, path_to_graveyard;
|
|
|
|
char nbuffer[8 + 8 + 1];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter(",'%pd','%pd'", dir, rep);
|
|
|
|
|
|
|
|
if (rep->d_parent != dir) {
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
_leave(" = -ESTALE");
|
|
|
|
return -ESTALE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* non-directories can just be unlinked */
|
|
|
|
if (!d_is_dir(rep)) {
|
|
|
|
dget(rep); /* Stop the dentry being negated if it's only pinned
|
|
|
|
* by a file struct.
|
|
|
|
*/
|
|
|
|
ret = cachefiles_unlink(cache, object, dir, rep, why);
|
|
|
|
dput(rep);
|
|
|
|
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* directories have to be moved to the graveyard */
|
|
|
|
_debug("move stale object to graveyard");
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
|
|
|
|
try_again:
|
|
|
|
/* first step is to make up a grave dentry in the graveyard */
|
|
|
|
sprintf(nbuffer, "%08x%08x",
|
|
|
|
(uint32_t) ktime_get_real_seconds(),
|
|
|
|
(uint32_t) atomic_inc_return(&cache->gravecounter));
|
|
|
|
|
|
|
|
/* do the multiway lock magic */
|
|
|
|
trap = lock_rename(cache->graveyard, dir);
|
|
|
|
|
|
|
|
/* do some checks before getting the grave dentry */
|
|
|
|
if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
|
|
|
|
/* the entry was probably culled when we dropped the parent dir
|
|
|
|
* lock */
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
_leave(" = 0 [culled?]");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!d_can_lookup(cache->graveyard)) {
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
cachefiles_io_error(cache, "Graveyard no longer a directory");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (trap == rep) {
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
cachefiles_io_error(cache, "May not make directory loop");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (d_mountpoint(rep)) {
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
cachefiles_io_error(cache, "Mountpoint in cache");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
|
|
|
|
if (IS_ERR(grave)) {
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
|
|
|
|
PTR_ERR(grave),
|
|
|
|
cachefiles_trace_lookup_error);
|
|
|
|
|
|
|
|
if (PTR_ERR(grave) == -ENOMEM) {
|
|
|
|
_leave(" = -ENOMEM");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (d_is_positive(grave)) {
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
dput(grave);
|
|
|
|
grave = NULL;
|
|
|
|
cond_resched();
|
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (d_mountpoint(grave)) {
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
dput(grave);
|
|
|
|
cachefiles_io_error(cache, "Mountpoint in graveyard");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* target should not be an ancestor of source */
|
|
|
|
if (trap == grave) {
|
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
dput(grave);
|
|
|
|
cachefiles_io_error(cache, "May not make directory loop");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* attempt the rename */
|
|
|
|
path.mnt = cache->mnt;
|
|
|
|
path.dentry = dir;
|
|
|
|
path_to_graveyard.mnt = cache->mnt;
|
|
|
|
path_to_graveyard.dentry = cache->graveyard;
|
|
|
|
ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
|
|
|
|
if (ret < 0) {
|
|
|
|
cachefiles_io_error(cache, "Rename security error %d", ret);
|
|
|
|
} else {
|
|
|
|
struct renamedata rd = {
|
2023-01-13 11:49:10 +00:00
|
|
|
.old_mnt_idmap = &nop_mnt_idmap,
|
2021-10-21 07:50:10 +00:00
|
|
|
.old_dir = d_inode(dir),
|
|
|
|
.old_dentry = rep,
|
2023-01-13 11:49:10 +00:00
|
|
|
.new_mnt_idmap = &nop_mnt_idmap,
|
2021-10-21 07:50:10 +00:00
|
|
|
.new_dir = d_inode(cache->graveyard),
|
|
|
|
.new_dentry = grave,
|
|
|
|
};
|
2022-01-14 11:44:54 +00:00
|
|
|
trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
|
2021-10-21 07:50:10 +00:00
|
|
|
ret = cachefiles_inject_read_error();
|
|
|
|
if (ret == 0)
|
|
|
|
ret = vfs_rename(&rd);
|
|
|
|
if (ret != 0)
|
|
|
|
trace_cachefiles_vfs_error(object, d_inode(dir), ret,
|
|
|
|
cachefiles_trace_rename_error);
|
|
|
|
if (ret != 0 && ret != -ENOMEM)
|
|
|
|
cachefiles_io_error(cache,
|
|
|
|
"Rename failed with error %d", ret);
|
|
|
|
}
|
|
|
|
|
2022-09-24 04:59:59 +00:00
|
|
|
__cachefiles_unmark_inode_in_use(object, d_inode(rep));
|
2021-10-21 07:50:10 +00:00
|
|
|
unlock_rename(cache->graveyard, dir);
|
|
|
|
dput(grave);
|
|
|
|
_leave(" = 0");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:50:10 +00:00
|
|
|
/*
|
|
|
|
* Delete a cache file.
|
|
|
|
*/
|
|
|
|
int cachefiles_delete_object(struct cachefiles_object *object,
|
|
|
|
enum fscache_why_object_killed why)
|
|
|
|
{
|
|
|
|
struct cachefiles_volume *volume = object->volume;
|
|
|
|
struct dentry *dentry = object->file->f_path.dentry;
|
|
|
|
struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter(",OBJ%x{%pD}", object->debug_id, object->file);
|
|
|
|
|
|
|
|
/* Stop the dentry being negated if it's only pinned by a file struct. */
|
|
|
|
dget(dentry);
|
|
|
|
|
|
|
|
inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
|
|
|
|
ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
|
|
|
|
inode_unlock(d_backing_inode(fan));
|
|
|
|
dput(dentry);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a temporary file and leave it unattached and un-xattr'd until the
|
|
|
|
* time comes to discard the object from memory.
|
|
|
|
*/
|
|
|
|
struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
|
|
|
|
{
|
|
|
|
struct cachefiles_volume *volume = object->volume;
|
|
|
|
struct cachefiles_cache *cache = volume->cache;
|
|
|
|
const struct cred *saved_cred;
|
|
|
|
struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
|
|
|
|
struct file *file;
|
2022-09-24 05:00:00 +00:00
|
|
|
const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
|
cachefiles: notify the user daemon when looking up cookie
Fscache/CacheFiles used to serve as a local cache for a remote
networking fs. A new on-demand read mode will be introduced for
CacheFiles, which can boost the scenario where on-demand read semantics
are needed, e.g. container image distribution.
The essential difference between these two modes is seen when a cache
miss occurs: In the original mode, the netfs will fetch the data from
the remote server and then write it to the cache file; in on-demand
read mode, fetching the data and writing it into the cache is delegated
to a user daemon.
As the first step, notify the user daemon when looking up cookie. In
this case, an anonymous fd is sent to the user daemon, through which the
user daemon can write the fetched data to the cache file. Since the user
daemon may move the anonymous fd around, e.g. through dup(), an object
ID uniquely identifying the cache file is also attached.
Also add one advisory flag (FSCACHE_ADV_WANT_CACHE_SIZE) suggesting that
the cache file size shall be retrieved at runtime. This helps the
scenario where one cache file contains multiple netfs files, e.g. for
the purpose of deduplication. In this case, netfs itself has no idea the
size of the cache file, whilst the user daemon should give the hint on
it.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220509074028.74954-3-jefflexu@linux.alibaba.com
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-04-25 12:21:24 +00:00
|
|
|
uint64_t ni_size;
|
2021-10-21 07:50:10 +00:00
|
|
|
long ret;
|
|
|
|
|
|
|
|
|
|
|
|
cachefiles_begin_secure(cache, &saved_cred);
|
|
|
|
|
|
|
|
ret = cachefiles_inject_write_error();
|
2022-09-24 04:59:59 +00:00
|
|
|
if (ret == 0) {
|
2023-06-26 17:14:36 +00:00
|
|
|
file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath,
|
|
|
|
S_IFREG | 0600,
|
2023-06-15 11:22:25 +00:00
|
|
|
O_RDWR | O_LARGEFILE | O_DIRECT,
|
|
|
|
cache->cache_cred);
|
2022-09-24 05:00:00 +00:00
|
|
|
ret = PTR_ERR_OR_ZERO(file);
|
2022-09-24 04:59:59 +00:00
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
trace_cachefiles_vfs_error(object, d_inode(fan), ret,
|
2021-10-21 07:50:10 +00:00
|
|
|
cachefiles_trace_tmpfile_error);
|
2022-09-24 04:59:59 +00:00
|
|
|
if (ret == -EIO)
|
2021-10-21 07:50:10 +00:00
|
|
|
cachefiles_io_error_obj(object, "Failed to create tmpfile");
|
2022-09-24 04:59:59 +00:00
|
|
|
goto err;
|
2021-10-21 07:50:10 +00:00
|
|
|
}
|
|
|
|
|
2022-09-24 05:00:00 +00:00
|
|
|
trace_cachefiles_tmpfile(object, file_inode(file));
|
2021-10-21 07:50:10 +00:00
|
|
|
|
2022-09-24 04:59:59 +00:00
|
|
|
/* This is a newly created file with no other possible user */
|
2022-09-24 05:00:00 +00:00
|
|
|
if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
|
2022-09-24 04:59:59 +00:00
|
|
|
WARN_ON(1);
|
2021-10-21 07:50:10 +00:00
|
|
|
|
cachefiles: notify the user daemon when looking up cookie
Fscache/CacheFiles used to serve as a local cache for a remote
networking fs. A new on-demand read mode will be introduced for
CacheFiles, which can boost the scenario where on-demand read semantics
are needed, e.g. container image distribution.
The essential difference between these two modes is seen when a cache
miss occurs: In the original mode, the netfs will fetch the data from
the remote server and then write it to the cache file; in on-demand
read mode, fetching the data and writing it into the cache is delegated
to a user daemon.
As the first step, notify the user daemon when looking up cookie. In
this case, an anonymous fd is sent to the user daemon, through which the
user daemon can write the fetched data to the cache file. Since the user
daemon may move the anonymous fd around, e.g. through dup(), an object
ID uniquely identifying the cache file is also attached.
Also add one advisory flag (FSCACHE_ADV_WANT_CACHE_SIZE) suggesting that
the cache file size shall be retrieved at runtime. This helps the
scenario where one cache file contains multiple netfs files, e.g. for
the purpose of deduplication. In this case, netfs itself has no idea the
size of the cache file, whilst the user daemon should give the hint on
it.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220509074028.74954-3-jefflexu@linux.alibaba.com
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-04-25 12:21:24 +00:00
|
|
|
ret = cachefiles_ondemand_init_object(object);
|
2022-09-24 04:59:59 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto err_unuse;
|
cachefiles: notify the user daemon when looking up cookie
Fscache/CacheFiles used to serve as a local cache for a remote
networking fs. A new on-demand read mode will be introduced for
CacheFiles, which can boost the scenario where on-demand read semantics
are needed, e.g. container image distribution.
The essential difference between these two modes is seen when a cache
miss occurs: In the original mode, the netfs will fetch the data from
the remote server and then write it to the cache file; in on-demand
read mode, fetching the data and writing it into the cache is delegated
to a user daemon.
As the first step, notify the user daemon when looking up cookie. In
this case, an anonymous fd is sent to the user daemon, through which the
user daemon can write the fetched data to the cache file. Since the user
daemon may move the anonymous fd around, e.g. through dup(), an object
ID uniquely identifying the cache file is also attached.
Also add one advisory flag (FSCACHE_ADV_WANT_CACHE_SIZE) suggesting that
the cache file size shall be retrieved at runtime. This helps the
scenario where one cache file contains multiple netfs files, e.g. for
the purpose of deduplication. In this case, netfs itself has no idea the
size of the cache file, whilst the user daemon should give the hint on
it.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220509074028.74954-3-jefflexu@linux.alibaba.com
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-04-25 12:21:24 +00:00
|
|
|
|
|
|
|
ni_size = object->cookie->object_size;
|
|
|
|
ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
|
|
|
|
|
2021-10-21 07:50:10 +00:00
|
|
|
if (ni_size > 0) {
|
2022-09-24 05:00:00 +00:00
|
|
|
trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
|
2021-10-21 07:50:10 +00:00
|
|
|
cachefiles_trunc_expand_tmpfile);
|
|
|
|
ret = cachefiles_inject_write_error();
|
|
|
|
if (ret == 0)
|
2022-09-24 05:00:00 +00:00
|
|
|
ret = vfs_truncate(&file->f_path, ni_size);
|
2021-10-21 07:50:10 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
trace_cachefiles_vfs_error(
|
2022-09-24 05:00:00 +00:00
|
|
|
object, file_inode(file), ret,
|
2021-10-21 07:50:10 +00:00
|
|
|
cachefiles_trace_trunc_error);
|
2022-09-24 04:59:59 +00:00
|
|
|
goto err_unuse;
|
2021-10-21 07:50:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-24 04:59:59 +00:00
|
|
|
ret = -EINVAL;
|
2021-10-21 07:50:10 +00:00
|
|
|
if (unlikely(!file->f_op->read_iter) ||
|
|
|
|
unlikely(!file->f_op->write_iter)) {
|
|
|
|
fput(file);
|
|
|
|
pr_notice("Cache does not support read_iter and write_iter\n");
|
2022-09-24 04:59:59 +00:00
|
|
|
goto err_unuse;
|
2021-10-21 07:50:10 +00:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
cachefiles_end_secure(cache, saved_cred);
|
|
|
|
return file;
|
2022-09-24 04:59:59 +00:00
|
|
|
|
|
|
|
err_unuse:
|
2022-09-24 05:00:00 +00:00
|
|
|
cachefiles_do_unmark_inode_in_use(object, file_inode(file));
|
|
|
|
fput(file);
|
2022-09-24 04:59:59 +00:00
|
|
|
err:
|
|
|
|
file = ERR_PTR(ret);
|
|
|
|
goto out;
|
2021-10-21 07:50:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new file.
|
|
|
|
*/
|
|
|
|
static bool cachefiles_create_file(struct cachefiles_object *object)
|
|
|
|
{
|
|
|
|
struct file *file;
|
|
|
|
int ret;
|
|
|
|
|
2021-10-21 20:58:29 +00:00
|
|
|
ret = cachefiles_has_space(object->volume->cache, 1, 0,
|
|
|
|
cachefiles_has_space_for_create);
|
2021-10-21 07:50:10 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
file = cachefiles_create_tmpfile(object);
|
|
|
|
if (IS_ERR(file))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
|
|
|
|
set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
|
|
|
|
_debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
|
|
|
|
object->file = file;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open an existing file, checking its attributes and replacing it if it is
|
|
|
|
* stale.
|
|
|
|
*/
|
|
|
|
static bool cachefiles_open_file(struct cachefiles_object *object,
|
|
|
|
struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct cachefiles_cache *cache = object->volume->cache;
|
|
|
|
struct file *file;
|
|
|
|
struct path path;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter("%pd", dentry);
|
|
|
|
|
2022-09-24 04:59:59 +00:00
|
|
|
if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
|
|
|
|
pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
|
|
|
|
dentry, d_inode(dentry)->i_ino);
|
2021-10-21 07:50:10 +00:00
|
|
|
return false;
|
2022-09-24 04:59:59 +00:00
|
|
|
}
|
2021-10-21 07:50:10 +00:00
|
|
|
|
|
|
|
/* We need to open a file interface onto a data file now as we can't do
|
|
|
|
* it on demand because writeback called from do_exit() sees
|
|
|
|
* current->fs == NULL - which breaks d_path() called from ext4 open.
|
|
|
|
*/
|
|
|
|
path.mnt = cache->mnt;
|
|
|
|
path.dentry = dentry;
|
2023-06-15 11:22:26 +00:00
|
|
|
file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
|
|
|
|
d_backing_inode(dentry), cache->cache_cred);
|
2021-10-21 07:50:10 +00:00
|
|
|
if (IS_ERR(file)) {
|
|
|
|
trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
|
|
|
|
PTR_ERR(file),
|
|
|
|
cachefiles_trace_open_error);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!file->f_op->read_iter) ||
|
|
|
|
unlikely(!file->f_op->write_iter)) {
|
|
|
|
pr_notice("Cache does not support read_iter and write_iter\n");
|
|
|
|
goto error_fput;
|
|
|
|
}
|
|
|
|
_debug("file -> %pd positive", dentry);
|
|
|
|
|
cachefiles: notify the user daemon when looking up cookie
Fscache/CacheFiles used to serve as a local cache for a remote
networking fs. A new on-demand read mode will be introduced for
CacheFiles, which can boost the scenario where on-demand read semantics
are needed, e.g. container image distribution.
The essential difference between these two modes is seen when a cache
miss occurs: In the original mode, the netfs will fetch the data from
the remote server and then write it to the cache file; in on-demand
read mode, fetching the data and writing it into the cache is delegated
to a user daemon.
As the first step, notify the user daemon when looking up cookie. In
this case, an anonymous fd is sent to the user daemon, through which the
user daemon can write the fetched data to the cache file. Since the user
daemon may move the anonymous fd around, e.g. through dup(), an object
ID uniquely identifying the cache file is also attached.
Also add one advisory flag (FSCACHE_ADV_WANT_CACHE_SIZE) suggesting that
the cache file size shall be retrieved at runtime. This helps the
scenario where one cache file contains multiple netfs files, e.g. for
the purpose of deduplication. In this case, netfs itself has no idea the
size of the cache file, whilst the user daemon should give the hint on
it.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220509074028.74954-3-jefflexu@linux.alibaba.com
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-04-25 12:21:24 +00:00
|
|
|
ret = cachefiles_ondemand_init_object(object);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error_fput;
|
|
|
|
|
2021-10-21 07:50:10 +00:00
|
|
|
ret = cachefiles_check_auxdata(object, file);
|
|
|
|
if (ret < 0)
|
|
|
|
goto check_failed;
|
|
|
|
|
mm, netfs, fscache: stop read optimisation when folio removed from pagecache
Fscache has an optimisation by which reads from the cache are skipped
until we know that (a) there's data there to be read and (b) that data
isn't entirely covered by pages resident in the netfs pagecache. This is
done with two flags manipulated by fscache_note_page_release():
if (...
test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
where the NO_DATA_TO_READ flag causes cachefiles_prepare_read() to
indicate that netfslib should download from the server or clear the page
instead.
The fscache_note_page_release() function is intended to be called from
->releasepage() - but that only gets called if PG_private or PG_private_2
is set - and currently the former is at the discretion of the network
filesystem and the latter is only set whilst a page is being written to
the cache, so sometimes we miss clearing the optimisation.
Fix this by following Willy's suggestion[1] and adding an address_space
flag, AS_RELEASE_ALWAYS, that causes filemap_release_folio() to always call
->release_folio() if it's set, even if PG_private or PG_private_2 aren't
set.
Note that this would require folio_test_private() and page_has_private() to
become more complicated. To avoid that, in the places[*] where these are
used to conditionalise calls to filemap_release_folio() and
try_to_release_page(), the tests are removed the those functions just
jumped to unconditionally and the test is performed there.
[*] There are some exceptions in vmscan.c where the check guards more than
just a call to the releaser. I've added a function, folio_needs_release()
to wrap all the checks for that.
AS_RELEASE_ALWAYS should be set if a non-NULL cookie is obtained from
fscache and cleared in ->evict_inode() before truncate_inode_pages_final()
is called.
Additionally, the FSCACHE_COOKIE_NO_DATA_TO_READ flag needs to be cleared
and the optimisation cancelled if a cachefiles object already contains data
when we open it.
[dwysocha@redhat.com: call folio_mapping() inside folio_needs_release()]
Link: https://github.com/DaveWysochanskiRH/kernel/commit/902c990e311120179fa5de99d68364b2947b79ec
Link: https://lkml.kernel.org/r/20230628104852.3391651-3-dhowells@redhat.com
Fixes: 1f67e6d0b188 ("fscache: Provide a function to note the release of a page")
Fixes: 047487c947e8 ("cachefiles: Implement the I/O routines")
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
Reported-by: Rohith Surabattula <rohiths.msft@gmail.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Tested-by: SeongJae Park <sj@kernel.org>
Cc: Daire Byrne <daire.byrne@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Steve French <sfrench@samba.org>
Cc: Shyam Prasad N <nspmangalore@gmail.com>
Cc: Rohith Surabattula <rohiths.msft@gmail.com>
Cc: Dave Wysochanski <dwysocha@redhat.com>
Cc: Dominique Martinet <asmadeus@codewreck.org>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Jingbo Xu <jefflexu@linux.alibaba.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Xiubo Li <xiubli@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-06-28 10:48:52 +00:00
|
|
|
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
|
|
|
|
|
2021-10-21 07:50:10 +00:00
|
|
|
object->file = file;
|
|
|
|
|
|
|
|
/* Always update the atime on an object we've just looked up (this is
|
|
|
|
* used to keep track of culling, and atimes are only updated by read,
|
|
|
|
* write and readdir but not lookup or open).
|
|
|
|
*/
|
|
|
|
touch_atime(&file->f_path);
|
|
|
|
dput(dentry);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
check_failed:
|
|
|
|
fscache_cookie_lookup_negative(object->cookie);
|
|
|
|
cachefiles_unmark_inode_in_use(object, file);
|
2022-03-30 09:47:59 +00:00
|
|
|
fput(file);
|
|
|
|
dput(dentry);
|
|
|
|
if (ret == -ESTALE)
|
2021-10-21 07:50:10 +00:00
|
|
|
return cachefiles_create_file(object);
|
2022-03-30 09:47:59 +00:00
|
|
|
return false;
|
|
|
|
|
2021-10-21 07:50:10 +00:00
|
|
|
error_fput:
|
|
|
|
fput(file);
|
|
|
|
error:
|
2022-09-24 04:59:59 +00:00
|
|
|
cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
|
2021-10-21 07:50:10 +00:00
|
|
|
dput(dentry);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* walk from the parent object to the child object through the backing
|
|
|
|
* filesystem, creating directories as we go
|
|
|
|
*/
|
|
|
|
bool cachefiles_look_up_object(struct cachefiles_object *object)
|
|
|
|
{
|
|
|
|
struct cachefiles_volume *volume = object->volume;
|
|
|
|
struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter("OBJ%x,%s,", object->debug_id, object->d_name);
|
|
|
|
|
|
|
|
/* Look up path "cache/vol/fanout/file". */
|
|
|
|
ret = cachefiles_inject_read_error();
|
|
|
|
if (ret == 0)
|
|
|
|
dentry = lookup_positive_unlocked(object->d_name, fan,
|
|
|
|
object->d_name_len);
|
|
|
|
else
|
|
|
|
dentry = ERR_PTR(ret);
|
2022-01-14 11:44:54 +00:00
|
|
|
trace_cachefiles_lookup(object, fan, dentry);
|
2021-10-21 07:50:10 +00:00
|
|
|
if (IS_ERR(dentry)) {
|
|
|
|
if (dentry == ERR_PTR(-ENOENT))
|
|
|
|
goto new_file;
|
|
|
|
if (dentry == ERR_PTR(-EIO))
|
|
|
|
cachefiles_io_error_obj(object, "Lookup failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!d_is_reg(dentry)) {
|
|
|
|
pr_err("%pd is not a file\n", dentry);
|
|
|
|
inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
|
|
|
|
ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
|
|
|
|
FSCACHE_OBJECT_IS_WEIRD);
|
|
|
|
dput(dentry);
|
|
|
|
if (ret < 0)
|
|
|
|
return false;
|
|
|
|
goto new_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cachefiles_open_file(object, dentry))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
_leave(" = t [%lu]", file_inode(object->file)->i_ino);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
new_file:
|
|
|
|
fscache_cookie_lookup_negative(object->cookie);
|
|
|
|
return cachefiles_create_file(object);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to link a temporary file into its rightful place in the cache.
|
|
|
|
*/
|
|
|
|
bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
|
|
|
|
struct cachefiles_object *object)
|
|
|
|
{
|
|
|
|
struct cachefiles_volume *volume = object->volume;
|
|
|
|
struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
|
|
|
|
bool success = false;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter(",%pD", object->file);
|
|
|
|
|
|
|
|
inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
|
|
|
|
ret = cachefiles_inject_read_error();
|
|
|
|
if (ret == 0)
|
|
|
|
dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
|
|
|
|
else
|
|
|
|
dentry = ERR_PTR(ret);
|
|
|
|
if (IS_ERR(dentry)) {
|
|
|
|
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
|
|
|
|
cachefiles_trace_lookup_error);
|
|
|
|
_debug("lookup fail %ld", PTR_ERR(dentry));
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!d_is_negative(dentry)) {
|
|
|
|
if (d_backing_inode(dentry) == file_inode(object->file)) {
|
|
|
|
success = true;
|
|
|
|
goto out_dput;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = cachefiles_unlink(volume->cache, object, fan, dentry,
|
|
|
|
FSCACHE_OBJECT_IS_STALE);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_dput;
|
|
|
|
|
|
|
|
dput(dentry);
|
|
|
|
ret = cachefiles_inject_read_error();
|
|
|
|
if (ret == 0)
|
|
|
|
dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
|
|
|
|
else
|
|
|
|
dentry = ERR_PTR(ret);
|
|
|
|
if (IS_ERR(dentry)) {
|
|
|
|
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
|
|
|
|
cachefiles_trace_lookup_error);
|
|
|
|
_debug("lookup fail %ld", PTR_ERR(dentry));
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = cachefiles_inject_read_error();
|
|
|
|
if (ret == 0)
|
2023-01-13 11:49:10 +00:00
|
|
|
ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap,
|
2021-10-21 07:50:10 +00:00
|
|
|
d_inode(fan), dentry, NULL);
|
|
|
|
if (ret < 0) {
|
|
|
|
trace_cachefiles_vfs_error(object, d_inode(fan), ret,
|
|
|
|
cachefiles_trace_link_error);
|
|
|
|
_debug("link fail %d", ret);
|
|
|
|
} else {
|
|
|
|
trace_cachefiles_link(object, file_inode(object->file));
|
|
|
|
spin_lock(&object->lock);
|
|
|
|
/* TODO: Do we want to switch the file pointer to the new dentry? */
|
|
|
|
clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
|
|
|
|
spin_unlock(&object->lock);
|
|
|
|
success = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_dput:
|
|
|
|
dput(dentry);
|
|
|
|
out_unlock:
|
|
|
|
inode_unlock(d_inode(fan));
|
|
|
|
_leave(" = %u", success);
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:50:10 +00:00
|
|
|
/*
|
|
|
|
* Look up an inode to be checked or culled. Return -EBUSY if the inode is
|
|
|
|
* marked in use.
|
|
|
|
*/
|
|
|
|
static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
|
|
|
|
struct dentry *dir,
|
|
|
|
char *filename)
|
|
|
|
{
|
|
|
|
struct dentry *victim;
|
|
|
|
int ret = -ENOENT;
|
|
|
|
|
|
|
|
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
|
|
|
|
|
|
|
|
victim = lookup_one_len(filename, dir, strlen(filename));
|
|
|
|
if (IS_ERR(victim))
|
|
|
|
goto lookup_error;
|
|
|
|
if (d_is_negative(victim))
|
|
|
|
goto lookup_put;
|
|
|
|
if (d_inode(victim)->i_flags & S_KERNEL_FILE)
|
|
|
|
goto lookup_busy;
|
|
|
|
return victim;
|
|
|
|
|
|
|
|
lookup_busy:
|
|
|
|
ret = -EBUSY;
|
|
|
|
lookup_put:
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
dput(victim);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
lookup_error:
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
ret = PTR_ERR(victim);
|
|
|
|
if (ret == -ENOENT)
|
|
|
|
return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
|
|
|
|
|
|
|
|
if (ret == -EIO) {
|
|
|
|
cachefiles_io_error(cache, "Lookup failed");
|
|
|
|
} else if (ret != -ENOMEM) {
|
|
|
|
pr_err("Internal error: %d\n", ret);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cull an object if it's not in use
|
|
|
|
* - called only by cache manager daemon
|
|
|
|
*/
|
|
|
|
int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
|
|
|
|
char *filename)
|
|
|
|
{
|
|
|
|
struct dentry *victim;
|
|
|
|
struct inode *inode;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter(",%pd/,%s", dir, filename);
|
|
|
|
|
|
|
|
victim = cachefiles_lookup_for_cull(cache, dir, filename);
|
|
|
|
if (IS_ERR(victim))
|
|
|
|
return PTR_ERR(victim);
|
|
|
|
|
|
|
|
/* check to see if someone is using this object */
|
|
|
|
inode = d_inode(victim);
|
|
|
|
inode_lock(inode);
|
|
|
|
if (inode->i_flags & S_KERNEL_FILE) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
} else {
|
|
|
|
/* Stop the cache from picking it back up */
|
|
|
|
inode->i_flags |= S_KERNEL_FILE;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
inode_unlock(inode);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error_unlock;
|
|
|
|
|
|
|
|
ret = cachefiles_bury_object(cache, NULL, dir, victim,
|
|
|
|
FSCACHE_OBJECT_WAS_CULLED);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
2021-10-22 08:17:58 +00:00
|
|
|
fscache_count_culled();
|
2021-10-21 07:50:10 +00:00
|
|
|
dput(victim);
|
|
|
|
_leave(" = 0");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_unlock:
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
error:
|
|
|
|
dput(victim);
|
|
|
|
if (ret == -ENOENT)
|
|
|
|
return -ESTALE; /* Probably got retired by the netfs */
|
|
|
|
|
|
|
|
if (ret != -ENOMEM) {
|
|
|
|
pr_err("Internal error: %d\n", ret);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find out if an object is in use or not
|
|
|
|
* - called only by cache manager daemon
|
|
|
|
* - returns -EBUSY or 0 to indicate whether an object is in use or not
|
|
|
|
*/
|
|
|
|
int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
|
|
|
|
char *filename)
|
|
|
|
{
|
|
|
|
struct dentry *victim;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
victim = cachefiles_lookup_for_cull(cache, dir, filename);
|
|
|
|
if (IS_ERR(victim))
|
|
|
|
return PTR_ERR(victim);
|
|
|
|
|
|
|
|
inode_unlock(d_inode(dir));
|
|
|
|
dput(victim);
|
|
|
|
return ret;
|
|
|
|
}
|