forked from Minki/linux
f008b1d6e1
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEqG5UsNXhtOCrfGQP+7dXa6fLC2sFAmI1HOwACgkQ+7dXa6fL C2u9mA/+LUdXHqlvET/PAtFTg75bUPeOFGLnuDnYl1Ng2FCKMSodAohpbVtENxsK E/gTVS7uiVZFQgC+YmNA00z6eIQkAaDVyvKyEcUbKREBbUgONfJ/HLeaK/NvVKxx TY5gx/POdG6yHRQXL6JGBqSJUB8bZrGKwnJm8ebzeKOji9n7GSJBYiMlYBA7EAhs Aut/P7Y39ISHLw3y+y5czBeRoubljmTyznbP20xUZEzrRwhTpNwpJVzBGUZU635T 93Sqcp//0U5LIdn6Pg6DUGHBMBTNDNJChb21ZoBusF/HHswXsOOnf/mcRUBSJUTI M1WSpNLk8PRBgajMdIymQpGU1sCZZzJ3krrSA3RcXdN6GPHwZg8kKjoroHsLDL6l igPbDSMJ5wfiwA2A2gXbY1CkAl3ik5ccb7ZqhTwS0WBk0vOnHmAsE9cs/bBo7Xii GTiWXEFOgtJiXANPMS2P9DiOS3ZQNf+wxotCYdkGPOXuX9wnIo1Kmy8XfujQ1bXf pJsEZKfeyROKrzyKWgqLI64/Kg5xNueoFQZfDpOlZYzF1uDstynADPUt0eQD706q jcuKaXLN3rn5gSPun5mWOYbRtXVgOLdFL/7zptMVJwFKBFguQENhjG4UMNZcjkVA 3Mr0kGocsgoCSk1oDBkFlrw1wIsXxWbkRBL1Pww6kovivuGUwoo= =j0yx -----END PGP SIGNATURE----- Merge tag 'netfs-prep-20220318' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs Pull netfs updates from David Howells: "Netfs prep for write helpers. Having had a go at implementing write helpers and content encryption support in netfslib, it seems that the netfs_read_{,sub}request structs and the equivalent write request structs were almost the same and so should be merged, thereby requiring only one set of alloc/get/put functions and a common set of tracepoints. Merging the structs also has the advantage that if a bounce buffer is added to the request struct, a read operation can be performed to fill the bounce buffer, the contents of the buffer can be modified and then a write operation can be performed on it to send the data wherever it needs to go using the same request structure all the way through. The I/O handlers would then transparently perform any required crypto. This should make it easier to perform RMW cycles if needed. The potentially common functions and structs, however, by their names all proclaim themselves to be associated with the read side of things. The bulk of these changes alter this in the following ways: - Rename struct netfs_read_{,sub}request to netfs_io_{,sub}request. - Rename some enums, members and flags to make them more appropriate. - Adjust some comments to match. - Drop "read"/"rreq" from the names of common functions. For instance, netfs_get_read_request() becomes netfs_get_request(). - The ->init_rreq() and ->issue_op() methods become ->init_request() and ->issue_read(). I've kept the latter as a read-specific function and in another branch added an ->issue_write() method. The driver source is then reorganised into a number of files: fs/netfs/buffered_read.c Create read reqs to the pagecache fs/netfs/io.c Dispatchers for read and write reqs fs/netfs/main.c Some general miscellaneous bits fs/netfs/objects.c Alloc, get and put functions fs/netfs/stats.c Optional procfs statistics. and future development can be fitted into this scheme, e.g.: fs/netfs/buffered_write.c Modify the pagecache fs/netfs/buffered_flush.c Writeback from the pagecache fs/netfs/direct_read.c DIO read support fs/netfs/direct_write.c DIO write support fs/netfs/unbuffered_write.c Write modifications directly back Beyond the above changes, there are also some changes that affect how things work: - Make fscache_end_operation() generally available. - In the netfs tracing header, generate enums from the symbol -> string mapping tables rather than manually coding them. - Add a struct for filesystems that uses netfslib to put into their inode wrapper structs to hold extra state that netfslib is interested in, such as the fscache cookie. This allows netfslib functions to be set in filesystem operation tables and jumped to directly without having to have a filesystem wrapper. - Add a member to the struct added above to track the remote inode length as that may differ if local modifications are buffered. We may need to supply an appropriate EOF pointer when storing data (in AFS for example). - Pass extra information to netfs_alloc_request() so that the ->init_request() hook can access it and retain information to indicate the origin of the operation. - Make the ->init_request() hook return an error, thereby allowing a filesystem that isn't allowed to cache an inode (ceph or cifs, for example) to skip readahead. - Switch to using refcount_t for subrequests and add tracepoints to log refcount changes for the request and subrequest structs. - Add a function to consolidate dispatching a read request. Similar code is used in three places and another couple are likely to be added in the future" Link: https://lore.kernel.org/all/2639515.1648483225@warthog.procyon.org.uk/ * tag 'netfs-prep-20220318' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: afs: Maintain netfs_i_context::remote_i_size netfs: Keep track of the actual remote file size netfs: Split some core bits out into their own file netfs: Split fs/netfs/read_helper.c netfs: Rename read_helper.c to io.c netfs: Prepare to split read_helper.c netfs: Add a function to consolidate beginning a read netfs: Add a netfs inode context ceph: Make ceph_init_request() check caps on readahead netfs: Change ->init_request() to return an error code netfs: Refactor arguments for netfs_alloc_read_request netfs: Adjust the netfs_failure tracepoint to indicate non-subreq lines netfs: Trace refcounting on the netfs_io_subrequest struct netfs: Trace refcounting on the netfs_io_request struct netfs: Adjust the netfs_rreq tracepoint slightly netfs: Split netfs_io_* object handling out netfs: Finish off rename of netfs_read_request to netfs_io_request netfs: Rename netfs_read_*request to netfs_io_*request netfs: Generate enums from trace symbol mapping lists fscache: export fscache_end_operation()
350 lines
9.4 KiB
C
350 lines
9.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* NFS filesystem cache interface
|
|
*
|
|
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/nfs_fs.h>
|
|
#include <linux/nfs_fs_sb.h>
|
|
#include <linux/in6.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/iversion.h>
|
|
|
|
#include "internal.h"
|
|
#include "iostat.h"
|
|
#include "fscache.h"
|
|
#include "nfstrace.h"
|
|
|
|
#define NFS_MAX_KEY_LEN 1000
|
|
|
|
static bool nfs_append_int(char *key, int *_len, unsigned long long x)
|
|
{
|
|
if (*_len > NFS_MAX_KEY_LEN)
|
|
return false;
|
|
if (x == 0)
|
|
key[(*_len)++] = ',';
|
|
else
|
|
*_len += sprintf(key + *_len, ",%llx", x);
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* Get the per-client index cookie for an NFS client if the appropriate mount
|
|
* flag was set
|
|
* - We always try and get an index cookie for the client, but get filehandle
|
|
* cookies on a per-superblock basis, depending on the mount flags
|
|
*/
|
|
static bool nfs_fscache_get_client_key(struct nfs_client *clp,
|
|
char *key, int *_len)
|
|
{
|
|
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
|
|
const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
|
|
|
|
*_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len,
|
|
",%u.%u,%x",
|
|
clp->rpc_ops->version,
|
|
clp->cl_minorversion,
|
|
clp->cl_addr.ss_family);
|
|
|
|
switch (clp->cl_addr.ss_family) {
|
|
case AF_INET:
|
|
if (!nfs_append_int(key, _len, sin->sin_port) ||
|
|
!nfs_append_int(key, _len, sin->sin_addr.s_addr))
|
|
return false;
|
|
return true;
|
|
|
|
case AF_INET6:
|
|
if (!nfs_append_int(key, _len, sin6->sin6_port) ||
|
|
!nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) ||
|
|
!nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) ||
|
|
!nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) ||
|
|
!nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3]))
|
|
return false;
|
|
return true;
|
|
|
|
default:
|
|
printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
|
|
clp->cl_addr.ss_family);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Get the cache cookie for an NFS superblock.
|
|
*
|
|
* The default uniquifier is just an empty string, but it may be overridden
|
|
* either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
|
|
* superblock across an automount point of some nature.
|
|
*/
|
|
int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
|
|
{
|
|
struct fscache_volume *vcookie;
|
|
struct nfs_server *nfss = NFS_SB(sb);
|
|
unsigned int len = 3;
|
|
char *key;
|
|
|
|
if (uniq) {
|
|
nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL);
|
|
if (!nfss->fscache_uniq)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL);
|
|
if (!key)
|
|
return -ENOMEM;
|
|
|
|
memcpy(key, "nfs", 3);
|
|
if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) ||
|
|
!nfs_append_int(key, &len, nfss->fsid.major) ||
|
|
!nfs_append_int(key, &len, nfss->fsid.minor) ||
|
|
!nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) ||
|
|
!nfs_append_int(key, &len, nfss->flags) ||
|
|
!nfs_append_int(key, &len, nfss->rsize) ||
|
|
!nfs_append_int(key, &len, nfss->wsize) ||
|
|
!nfs_append_int(key, &len, nfss->acregmin) ||
|
|
!nfs_append_int(key, &len, nfss->acregmax) ||
|
|
!nfs_append_int(key, &len, nfss->acdirmin) ||
|
|
!nfs_append_int(key, &len, nfss->acdirmax) ||
|
|
!nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor))
|
|
goto out;
|
|
|
|
if (ulen > 0) {
|
|
if (ulen > NFS_MAX_KEY_LEN - len)
|
|
goto out;
|
|
key[len++] = ',';
|
|
memcpy(key + len, uniq, ulen);
|
|
len += ulen;
|
|
}
|
|
key[len] = 0;
|
|
|
|
/* create a cache index for looking up filehandles */
|
|
vcookie = fscache_acquire_volume(key,
|
|
NULL, /* preferred_cache */
|
|
NULL, 0 /* coherency_data */);
|
|
if (IS_ERR(vcookie)) {
|
|
if (vcookie != ERR_PTR(-EBUSY)) {
|
|
kfree(key);
|
|
return PTR_ERR(vcookie);
|
|
}
|
|
pr_err("NFS: Cache volume key already in use (%s)\n", key);
|
|
vcookie = NULL;
|
|
}
|
|
nfss->fscache = vcookie;
|
|
|
|
out:
|
|
kfree(key);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* release a per-superblock cookie
|
|
*/
|
|
void nfs_fscache_release_super_cookie(struct super_block *sb)
|
|
{
|
|
struct nfs_server *nfss = NFS_SB(sb);
|
|
|
|
fscache_relinquish_volume(nfss->fscache, NULL, false);
|
|
nfss->fscache = NULL;
|
|
kfree(nfss->fscache_uniq);
|
|
}
|
|
|
|
/*
|
|
* Initialise the per-inode cache cookie pointer for an NFS inode.
|
|
*/
|
|
void nfs_fscache_init_inode(struct inode *inode)
|
|
{
|
|
struct nfs_fscache_inode_auxdata auxdata;
|
|
struct nfs_server *nfss = NFS_SERVER(inode);
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
nfsi->fscache = NULL;
|
|
if (!(nfss->fscache && S_ISREG(inode->i_mode)))
|
|
return;
|
|
|
|
nfs_fscache_update_auxdata(&auxdata, inode);
|
|
|
|
nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
|
|
0,
|
|
nfsi->fh.data, /* index_key */
|
|
nfsi->fh.size,
|
|
&auxdata, /* aux_data */
|
|
sizeof(auxdata),
|
|
i_size_read(inode));
|
|
}
|
|
|
|
/*
|
|
* Release a per-inode cookie.
|
|
*/
|
|
void nfs_fscache_clear_inode(struct inode *inode)
|
|
{
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
struct fscache_cookie *cookie = nfs_i_fscache(inode);
|
|
|
|
fscache_relinquish_cookie(cookie, false);
|
|
nfsi->fscache = NULL;
|
|
}
|
|
|
|
/*
|
|
* Enable or disable caching for a file that is being opened as appropriate.
|
|
* The cookie is allocated when the inode is initialised, but is not enabled at
|
|
* that time. Enablement is deferred to file-open time to avoid stat() and
|
|
* access() thrashing the cache.
|
|
*
|
|
* For now, with NFS, only regular files that are open read-only will be able
|
|
* to use the cache.
|
|
*
|
|
* We enable the cache for an inode if we open it read-only and it isn't
|
|
* currently open for writing. We disable the cache if the inode is open
|
|
* write-only.
|
|
*
|
|
* The caller uses the file struct to pin i_writecount on the inode before
|
|
* calling us when a file is opened for writing, so we can make use of that.
|
|
*
|
|
* Note that this may be invoked multiple times in parallel by parallel
|
|
* nfs_open() functions.
|
|
*/
|
|
void nfs_fscache_open_file(struct inode *inode, struct file *filp)
|
|
{
|
|
struct nfs_fscache_inode_auxdata auxdata;
|
|
struct fscache_cookie *cookie = nfs_i_fscache(inode);
|
|
bool open_for_write = inode_is_open_for_write(inode);
|
|
|
|
if (!fscache_cookie_valid(cookie))
|
|
return;
|
|
|
|
fscache_use_cookie(cookie, open_for_write);
|
|
if (open_for_write) {
|
|
nfs_fscache_update_auxdata(&auxdata, inode);
|
|
fscache_invalidate(cookie, &auxdata, i_size_read(inode),
|
|
FSCACHE_INVAL_DIO_WRITE);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
|
|
|
|
void nfs_fscache_release_file(struct inode *inode, struct file *filp)
|
|
{
|
|
struct nfs_fscache_inode_auxdata auxdata;
|
|
struct fscache_cookie *cookie = nfs_i_fscache(inode);
|
|
|
|
if (fscache_cookie_valid(cookie)) {
|
|
nfs_fscache_update_auxdata(&auxdata, inode);
|
|
fscache_unuse_cookie(cookie, &auxdata, NULL);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Fallback page reading interface.
|
|
*/
|
|
static int fscache_fallback_read_page(struct inode *inode, struct page *page)
|
|
{
|
|
struct netfs_cache_resources cres;
|
|
struct fscache_cookie *cookie = nfs_i_fscache(inode);
|
|
struct iov_iter iter;
|
|
struct bio_vec bvec[1];
|
|
int ret;
|
|
|
|
memset(&cres, 0, sizeof(cres));
|
|
bvec[0].bv_page = page;
|
|
bvec[0].bv_offset = 0;
|
|
bvec[0].bv_len = PAGE_SIZE;
|
|
iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
|
|
|
|
ret = fscache_begin_read_operation(&cres, cookie);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
|
|
NULL, NULL);
|
|
fscache_end_operation(&cres);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Fallback page writing interface.
|
|
*/
|
|
static int fscache_fallback_write_page(struct inode *inode, struct page *page,
|
|
bool no_space_allocated_yet)
|
|
{
|
|
struct netfs_cache_resources cres;
|
|
struct fscache_cookie *cookie = nfs_i_fscache(inode);
|
|
struct iov_iter iter;
|
|
struct bio_vec bvec[1];
|
|
loff_t start = page_offset(page);
|
|
size_t len = PAGE_SIZE;
|
|
int ret;
|
|
|
|
memset(&cres, 0, sizeof(cres));
|
|
bvec[0].bv_page = page;
|
|
bvec[0].bv_offset = 0;
|
|
bvec[0].bv_len = PAGE_SIZE;
|
|
iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
|
|
|
|
ret = fscache_begin_write_operation(&cres, cookie);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
|
|
no_space_allocated_yet);
|
|
if (ret == 0)
|
|
ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
|
|
fscache_end_operation(&cres);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Retrieve a page from fscache
|
|
*/
|
|
int __nfs_fscache_read_page(struct inode *inode, struct page *page)
|
|
{
|
|
int ret;
|
|
|
|
trace_nfs_fscache_read_page(inode, page);
|
|
if (PageChecked(page)) {
|
|
ClearPageChecked(page);
|
|
ret = 1;
|
|
goto out;
|
|
}
|
|
|
|
ret = fscache_fallback_read_page(inode, page);
|
|
if (ret < 0) {
|
|
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
|
|
SetPageChecked(page);
|
|
goto out;
|
|
}
|
|
|
|
/* Read completed synchronously */
|
|
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
|
|
SetPageUptodate(page);
|
|
ret = 0;
|
|
out:
|
|
trace_nfs_fscache_read_page_exit(inode, page, ret);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Store a newly fetched page in fscache. We can be certain there's no page
|
|
* stored in the cache as yet otherwise we would've read it from there.
|
|
*/
|
|
void __nfs_fscache_write_page(struct inode *inode, struct page *page)
|
|
{
|
|
int ret;
|
|
|
|
trace_nfs_fscache_write_page(inode, page);
|
|
|
|
ret = fscache_fallback_write_page(inode, page, true);
|
|
|
|
if (ret != 0) {
|
|
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
|
|
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
|
|
} else {
|
|
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
|
|
}
|
|
trace_nfs_fscache_write_page_exit(inode, page, ret);
|
|
}
|