mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
NFS: Convert buffered read paths to use netfs when fscache is enabled
Convert the NFS buffered read code paths to corresponding netfs APIs, but only when fscache is configured and enabled. The netfs API defines struct netfs_request_ops which must be filled in by the network filesystem. For NFS, we only need to define 5 of the functions, the main one being the issue_read() function. The issue_read() function is called by the netfs layer when a read cannot be fulfilled locally, and must be sent to the server (either the cache is not active, or it is active but the data is not available). Once the read from the server is complete, netfs requires a call to netfs_subreq_terminated() which conveys either how many bytes were read successfully, or an error. Note that issue_read() is called with a structure, netfs_io_subrequest, which defines the IO requested, and contains a start and a length (both in bytes), and assumes the underlying netfs will return a either an error on the whole region, or the number of bytes successfully read. The NFS IO path is page based and the main APIs are the pgio APIs defined in pagelist.c. For the pgio APIs, there is no way for the caller to know how many RPCs will be sent and how the pages will be broken up into underlying RPCs, each of which will have their own completion and return code. In contrast, netfs is subrequest based, a single subrequest may contain multiple pages, and a single subrequest is initiated with issue_read() and terminated with netfs_subreq_terminated(). Thus, to utilze the netfs APIs, NFS needs some way to accommodate the netfs API requirement on the single response to the whole subrequest, while also minimizing disruptive changes to the NFS pgio layer. The approach taken with this patch is to allocate a small structure for each nfs_netfs_issue_read() call, store the final error and number of bytes successfully transferred in the structure, and update these values as each RPC completes. The refcount on the structure is used as a marker for the last RPC completion, is incremented in nfs_netfs_read_initiate(), and decremented inside nfs_netfs_read_completion(), when a nfs_pgio_header contains a valid pointer to the data. On the final put (which signals the final outstanding RPC is complete) in nfs_netfs_read_completion(), call netfs_subreq_terminated() with either the final error value (if one or more READs complete with an error) or the number of bytes successfully transferred (if all RPCs complete successfully). Note that when all RPCs complete successfully, the number of bytes transferred is capped to the length of the subrequest. Capping the transferred length to the subrequest length prevents "Subreq overread" warnings from netfs. This is due to the "aligned_len" in nfs_pageio_add_page(), and the corner case where NFS requests a full page at the end of the file, even when i_size reflects only a partial page (NFS overread). Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Tested-by: Daire Byrne <daire@dneg.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
88a4d7bdee
commit
000dbe0bec
220
fs/nfs/fscache.c
220
fs/nfs/fscache.c
@ -15,6 +15,9 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/iversion.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/fscache.h>
|
||||
#include <linux/netfs.h>
|
||||
|
||||
#include "internal.h"
|
||||
#include "iostat.h"
|
||||
@ -235,108 +238,153 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
|
||||
fscache_unuse_cookie(cookie, &auxdata, &i_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback page reading interface.
|
||||
*/
|
||||
static int fscache_fallback_read_page(struct inode *inode, struct page *page)
|
||||
int nfs_netfs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
struct netfs_cache_resources cres;
|
||||
struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
|
||||
struct iov_iter iter;
|
||||
struct bio_vec bvec;
|
||||
int ret;
|
||||
if (!netfs_inode(folio_inode(folio))->cache)
|
||||
return -ENOBUFS;
|
||||
|
||||
memset(&cres, 0, sizeof(cres));
|
||||
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
|
||||
iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE);
|
||||
|
||||
ret = fscache_begin_read_operation(&cres, cookie);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
|
||||
NULL, NULL);
|
||||
fscache_end_operation(&cres);
|
||||
return ret;
|
||||
return netfs_read_folio(file, folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback page writing interface.
|
||||
*/
|
||||
static int fscache_fallback_write_page(struct inode *inode, struct page *page,
|
||||
bool no_space_allocated_yet)
|
||||
int nfs_netfs_readahead(struct readahead_control *ractl)
|
||||
{
|
||||
struct netfs_cache_resources cres;
|
||||
struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
|
||||
struct iov_iter iter;
|
||||
struct bio_vec bvec;
|
||||
loff_t start = page_offset(page);
|
||||
size_t len = PAGE_SIZE;
|
||||
int ret;
|
||||
struct inode *inode = ractl->mapping->host;
|
||||
|
||||
memset(&cres, 0, sizeof(cres));
|
||||
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
|
||||
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
|
||||
if (!netfs_inode(inode)->cache)
|
||||
return -ENOBUFS;
|
||||
|
||||
ret = fscache_begin_write_operation(&cres, cookie);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
|
||||
no_space_allocated_yet);
|
||||
if (ret == 0)
|
||||
ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
|
||||
fscache_end_operation(&cres);
|
||||
return ret;
|
||||
netfs_readahead(ractl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve a page from fscache
|
||||
*/
|
||||
int __nfs_fscache_read_page(struct inode *inode, struct page *page)
|
||||
atomic_t nfs_netfs_debug_id;
|
||||
static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
|
||||
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
|
||||
|
||||
trace_nfs_fscache_read_page(inode, page);
|
||||
if (PageChecked(page)) {
|
||||
ClearPageChecked(page);
|
||||
ret = 1;
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nfs_netfs_free_request(struct netfs_io_request *rreq)
|
||||
{
|
||||
put_nfs_open_context(rreq->netfs_priv);
|
||||
}
|
||||
|
||||
static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
|
||||
{
|
||||
return fscache_begin_read_operation(&rreq->cache_resources,
|
||||
netfs_i_cookie(netfs_inode(rreq->inode)));
|
||||
}
|
||||
|
||||
static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
|
||||
{
|
||||
struct nfs_netfs_io_data *netfs;
|
||||
|
||||
netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
|
||||
if (!netfs)
|
||||
return NULL;
|
||||
netfs->sreq = sreq;
|
||||
refcount_set(&netfs->refcount, 1);
|
||||
return netfs;
|
||||
}
|
||||
|
||||
static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
|
||||
{
|
||||
size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;
|
||||
|
||||
sreq->len = min(sreq->len, rsize);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
|
||||
{
|
||||
struct nfs_netfs_io_data *netfs;
|
||||
struct nfs_pageio_descriptor pgio;
|
||||
struct inode *inode = sreq->rreq->inode;
|
||||
struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
|
||||
struct page *page;
|
||||
int err;
|
||||
pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
|
||||
pgoff_t last = ((sreq->start + sreq->len -
|
||||
sreq->transferred - 1) >> PAGE_SHIFT);
|
||||
XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);
|
||||
|
||||
nfs_pageio_init_read(&pgio, inode, false,
|
||||
&nfs_async_read_completion_ops);
|
||||
|
||||
netfs = nfs_netfs_alloc(sreq);
|
||||
if (!netfs)
|
||||
return netfs_subreq_terminated(sreq, -ENOMEM, false);
|
||||
|
||||
pgio.pg_netfs = netfs; /* used in completion */
|
||||
|
||||
xas_lock(&xas);
|
||||
xas_for_each(&xas, page, last) {
|
||||
/* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
|
||||
xas_pause(&xas);
|
||||
xas_unlock(&xas);
|
||||
err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
|
||||
if (err < 0) {
|
||||
netfs->error = err;
|
||||
goto out;
|
||||
}
|
||||
xas_lock(&xas);
|
||||
}
|
||||
|
||||
ret = fscache_fallback_read_page(inode, page);
|
||||
if (ret < 0) {
|
||||
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
|
||||
SetPageChecked(page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Read completed synchronously */
|
||||
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
|
||||
SetPageUptodate(page);
|
||||
ret = 0;
|
||||
xas_unlock(&xas);
|
||||
out:
|
||||
trace_nfs_fscache_read_page_exit(inode, page, ret);
|
||||
return ret;
|
||||
nfs_pageio_complete_read(&pgio);
|
||||
nfs_netfs_put(netfs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Store a newly fetched page in fscache. We can be certain there's no page
|
||||
* stored in the cache as yet otherwise we would've read it from there.
|
||||
*/
|
||||
void __nfs_fscache_write_page(struct inode *inode, struct page *page)
|
||||
void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
|
||||
{
|
||||
int ret;
|
||||
struct nfs_netfs_io_data *netfs = hdr->netfs;
|
||||
|
||||
trace_nfs_fscache_write_page(inode, page);
|
||||
if (!netfs)
|
||||
return;
|
||||
|
||||
ret = fscache_fallback_write_page(inode, page, true);
|
||||
|
||||
if (ret != 0) {
|
||||
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
|
||||
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
|
||||
} else {
|
||||
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
|
||||
}
|
||||
trace_nfs_fscache_write_page_exit(inode, page, ret);
|
||||
nfs_netfs_get(netfs);
|
||||
}
|
||||
|
||||
int nfs_netfs_folio_unlock(struct folio *folio)
|
||||
{
|
||||
struct inode *inode = folio_file_mapping(folio)->host;
|
||||
|
||||
/*
|
||||
* If fscache is enabled, netfs will unlock pages.
|
||||
*/
|
||||
if (netfs_inode(inode)->cache)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
|
||||
{
|
||||
struct nfs_netfs_io_data *netfs = hdr->netfs;
|
||||
struct netfs_io_subrequest *sreq;
|
||||
|
||||
if (!netfs)
|
||||
return;
|
||||
|
||||
sreq = netfs->sreq;
|
||||
if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
|
||||
|
||||
if (hdr->error)
|
||||
netfs->error = hdr->error;
|
||||
else
|
||||
atomic64_add(hdr->res.count, &netfs->transferred);
|
||||
|
||||
nfs_netfs_put(netfs);
|
||||
hdr->netfs = NULL;
|
||||
}
|
||||
|
||||
const struct netfs_request_ops nfs_netfs_ops = {
|
||||
.init_request = nfs_netfs_init_request,
|
||||
.free_request = nfs_netfs_free_request,
|
||||
.begin_cache_operation = nfs_netfs_begin_cache_operation,
|
||||
.issue_read = nfs_netfs_issue_read,
|
||||
.clamp_length = nfs_netfs_clamp_length
|
||||
};
|
||||
|
122
fs/nfs/fscache.h
122
fs/nfs/fscache.h
@ -34,6 +34,58 @@ struct nfs_fscache_inode_auxdata {
|
||||
u64 change_attr;
|
||||
};
|
||||
|
||||
struct nfs_netfs_io_data {
|
||||
/*
|
||||
* NFS may split a netfs_io_subrequest into multiple RPCs, each
|
||||
* with their own read completion. In netfs, we can only call
|
||||
* netfs_subreq_terminated() once for each subrequest. Use the
|
||||
* refcount here to double as a marker of the last RPC completion,
|
||||
* and only call netfs via netfs_subreq_terminated() once.
|
||||
*/
|
||||
refcount_t refcount;
|
||||
struct netfs_io_subrequest *sreq;
|
||||
|
||||
/*
|
||||
* Final disposition of the netfs_io_subrequest, sent in
|
||||
* netfs_subreq_terminated()
|
||||
*/
|
||||
atomic64_t transferred;
|
||||
int error;
|
||||
};
|
||||
|
||||
static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
|
||||
{
|
||||
refcount_inc(&netfs->refcount);
|
||||
}
|
||||
|
||||
static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
|
||||
{
|
||||
ssize_t final_len;
|
||||
|
||||
/* Only the last RPC completion should call netfs_subreq_terminated() */
|
||||
if (!refcount_dec_and_test(&netfs->refcount))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The NFS pageio interface may read a complete page, even when netfs
|
||||
* only asked for a partial page. Specifically, this may be seen when
|
||||
* one thread is truncating a file while another one is reading the last
|
||||
* page of the file.
|
||||
* Correct the final length here to be no larger than the netfs subrequest
|
||||
* length, and thus avoid netfs's "Subreq overread" warning message.
|
||||
*/
|
||||
final_len = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred));
|
||||
netfs_subreq_terminated(netfs->sreq, netfs->error ?: final_len, false);
|
||||
kfree(netfs);
|
||||
}
|
||||
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
|
||||
{
|
||||
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops);
|
||||
}
|
||||
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
|
||||
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
|
||||
extern int nfs_netfs_folio_unlock(struct folio *folio);
|
||||
|
||||
/*
|
||||
* fscache.c
|
||||
*/
|
||||
@ -44,9 +96,8 @@ extern void nfs_fscache_init_inode(struct inode *);
|
||||
extern void nfs_fscache_clear_inode(struct inode *);
|
||||
extern void nfs_fscache_open_file(struct inode *, struct file *);
|
||||
extern void nfs_fscache_release_file(struct inode *, struct file *);
|
||||
|
||||
extern int __nfs_fscache_read_page(struct inode *, struct page *);
|
||||
extern void __nfs_fscache_write_page(struct inode *, struct page *);
|
||||
extern int nfs_netfs_readahead(struct readahead_control *ractl);
|
||||
extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
|
||||
|
||||
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
|
||||
{
|
||||
@ -54,34 +105,11 @@ static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
|
||||
if (current_is_kswapd() || !(gfp & __GFP_FS))
|
||||
return false;
|
||||
folio_wait_fscache(folio);
|
||||
fscache_note_page_release(netfs_i_cookie(&NFS_I(folio->mapping->host)->netfs));
|
||||
nfs_inc_fscache_stats(folio->mapping->host,
|
||||
NFSIOS_FSCACHE_PAGES_UNCACHED);
|
||||
}
|
||||
fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve a page from an inode data storage object.
|
||||
*/
|
||||
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
|
||||
{
|
||||
if (netfs_inode(inode)->cache)
|
||||
return __nfs_fscache_read_page(inode, page);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store a page newly fetched from the server in an inode data storage object
|
||||
* in the cache.
|
||||
*/
|
||||
static inline void nfs_fscache_write_page(struct inode *inode,
|
||||
struct page *page)
|
||||
{
|
||||
if (netfs_inode(inode)->cache)
|
||||
__nfs_fscache_write_page(inode, page);
|
||||
}
|
||||
|
||||
static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
|
||||
struct inode *inode)
|
||||
{
|
||||
@ -117,7 +145,28 @@ static inline const char *nfs_server_fscache_state(struct nfs_server *server)
|
||||
return "no ";
|
||||
}
|
||||
|
||||
static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
|
||||
struct nfs_pageio_descriptor *desc)
|
||||
{
|
||||
hdr->netfs = desc->pg_netfs;
|
||||
}
|
||||
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
|
||||
struct nfs_pgio_header *hdr)
|
||||
{
|
||||
desc->pg_netfs = hdr->netfs;
|
||||
}
|
||||
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc)
|
||||
{
|
||||
desc->pg_netfs = NULL;
|
||||
}
|
||||
#else /* CONFIG_NFS_FSCACHE */
|
||||
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) {}
|
||||
static inline void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) {}
|
||||
static inline void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) {}
|
||||
static inline int nfs_netfs_folio_unlock(struct folio *folio)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
|
||||
|
||||
static inline void nfs_fscache_init_inode(struct inode *inode) {}
|
||||
@ -125,22 +174,29 @@ static inline void nfs_fscache_clear_inode(struct inode *inode) {}
|
||||
static inline void nfs_fscache_open_file(struct inode *inode,
|
||||
struct file *filp) {}
|
||||
static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
|
||||
static inline int nfs_netfs_readahead(struct readahead_control *ractl)
|
||||
{
|
||||
return -ENOBUFS;
|
||||
}
|
||||
static inline int nfs_netfs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
|
||||
{
|
||||
return true; /* may release folio */
|
||||
}
|
||||
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
|
||||
{
|
||||
return -ENOBUFS;
|
||||
}
|
||||
static inline void nfs_fscache_write_page(struct inode *inode, struct page *page) {}
|
||||
static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}
|
||||
|
||||
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
|
||||
{
|
||||
return "no ";
|
||||
}
|
||||
|
||||
static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
|
||||
struct nfs_pageio_descriptor *desc) {}
|
||||
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
|
||||
struct nfs_pgio_header *hdr) {}
|
||||
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc) {}
|
||||
#endif /* CONFIG_NFS_FSCACHE */
|
||||
#endif /* _NFS_FSCACHE_H */
|
||||
|
@ -2254,6 +2254,8 @@ struct inode *nfs_alloc_inode(struct super_block *sb)
|
||||
#ifdef CONFIG_NFS_V4_2
|
||||
nfsi->xattr_cache = NULL;
|
||||
#endif
|
||||
nfs_netfs_inode_init(nfsi);
|
||||
|
||||
return &nfsi->vfs_inode;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_alloc_inode);
|
||||
|
@ -452,6 +452,10 @@ extern void nfs_sb_deactive(struct super_block *sb);
|
||||
extern int nfs_client_for_each_server(struct nfs_client *clp,
|
||||
int (*fn)(struct nfs_server *, void *),
|
||||
void *data);
|
||||
#ifdef CONFIG_NFS_FSCACHE
|
||||
extern const struct netfs_request_ops nfs_netfs_ops;
|
||||
#endif
|
||||
|
||||
/* io.c */
|
||||
extern void nfs_start_io_read(struct inode *inode);
|
||||
extern void nfs_end_io_read(struct inode *inode);
|
||||
@ -481,9 +485,14 @@ extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool
|
||||
|
||||
struct nfs_pgio_completion_ops;
|
||||
/* read.c */
|
||||
extern const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
|
||||
extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
|
||||
struct inode *inode, bool force_mds,
|
||||
const struct nfs_pgio_completion_ops *compl_ops);
|
||||
extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
|
||||
struct nfs_open_context *ctx,
|
||||
struct folio *folio);
|
||||
extern void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio);
|
||||
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
|
||||
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "internal.h"
|
||||
#include "pnfs.h"
|
||||
#include "nfstrace.h"
|
||||
#include "fscache.h"
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
|
||||
|
||||
@ -105,6 +106,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
|
||||
hdr->good_bytes = mirror->pg_count;
|
||||
hdr->io_completion = desc->pg_io_completion;
|
||||
hdr->dreq = desc->pg_dreq;
|
||||
nfs_netfs_set_pgio_header(hdr, desc);
|
||||
hdr->release = release;
|
||||
hdr->completion_ops = desc->pg_completion_ops;
|
||||
if (hdr->completion_ops->init_hdr)
|
||||
@ -941,6 +943,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
|
||||
desc->pg_lseg = NULL;
|
||||
desc->pg_io_completion = NULL;
|
||||
desc->pg_dreq = NULL;
|
||||
nfs_netfs_reset_pageio_descriptor(desc);
|
||||
desc->pg_bsize = bsize;
|
||||
|
||||
desc->pg_mirror_count = 1;
|
||||
@ -1477,6 +1480,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
|
||||
|
||||
desc->pg_io_completion = hdr->io_completion;
|
||||
desc->pg_dreq = hdr->dreq;
|
||||
nfs_netfs_set_pageio_descriptor(desc, hdr);
|
||||
list_splice_init(&hdr->pages, &pages);
|
||||
while (!list_empty(&pages)) {
|
||||
struct nfs_page *req = nfs_list_entry(pages.next);
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
|
||||
|
||||
static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
|
||||
const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
|
||||
static const struct nfs_rw_ops nfs_rw_read_ops;
|
||||
|
||||
static struct kmem_cache *nfs_rdata_cachep;
|
||||
@ -74,7 +74,7 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
|
||||
|
||||
static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
|
||||
void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
|
||||
{
|
||||
struct nfs_pgio_mirror *pgm;
|
||||
unsigned long npages;
|
||||
@ -110,20 +110,14 @@ EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
|
||||
|
||||
static void nfs_readpage_release(struct nfs_page *req, int error)
|
||||
{
|
||||
struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
|
||||
struct folio *folio = nfs_page_to_folio(req);
|
||||
|
||||
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
|
||||
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
|
||||
(long long)req_offset(req));
|
||||
|
||||
if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
|
||||
folio_set_error(folio);
|
||||
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
|
||||
if (folio_test_uptodate(folio))
|
||||
nfs_fscache_write_page(inode, &folio->page);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
|
||||
if (nfs_netfs_folio_unlock(folio))
|
||||
folio_unlock(folio);
|
||||
|
||||
nfs_release_request(req);
|
||||
}
|
||||
|
||||
@ -177,6 +171,8 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
|
||||
nfs_list_remove_request(req);
|
||||
nfs_readpage_release(req, error);
|
||||
}
|
||||
nfs_netfs_read_completion(hdr);
|
||||
|
||||
out:
|
||||
hdr->release(hdr);
|
||||
}
|
||||
@ -187,6 +183,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
|
||||
struct rpc_task_setup *task_setup_data, int how)
|
||||
{
|
||||
rpc_ops->read_setup(hdr, msg);
|
||||
nfs_netfs_initiate_read(hdr);
|
||||
trace_nfs_initiate_read(hdr);
|
||||
}
|
||||
|
||||
@ -202,7 +199,7 @@ nfs_async_read_error(struct list_head *head, int error)
|
||||
}
|
||||
}
|
||||
|
||||
static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
|
||||
const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
|
||||
.error_cleanup = nfs_async_read_error,
|
||||
.completion = nfs_read_completion,
|
||||
};
|
||||
@ -277,9 +274,9 @@ static void nfs_readpage_result(struct rpc_task *task,
|
||||
nfs_readpage_retry(task, hdr);
|
||||
}
|
||||
|
||||
static int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
|
||||
struct nfs_open_context *ctx,
|
||||
struct folio *folio)
|
||||
int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
|
||||
struct nfs_open_context *ctx,
|
||||
struct folio *folio)
|
||||
{
|
||||
struct inode *inode = folio_file_mapping(folio)->host;
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
@ -295,15 +292,11 @@ static int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
|
||||
|
||||
aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
|
||||
|
||||
if (!IS_SYNC(inode)) {
|
||||
error = nfs_fscache_read_page(inode, &folio->page);
|
||||
if (error == 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
|
||||
if (IS_ERR(new))
|
||||
goto out_error;
|
||||
if (IS_ERR(new)) {
|
||||
error = PTR_ERR(new);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (len < fsize)
|
||||
folio_zero_segment(folio, len, fsize);
|
||||
@ -314,10 +307,6 @@ static int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
|
||||
goto out;
|
||||
}
|
||||
return 0;
|
||||
out_error:
|
||||
error = PTR_ERR(new);
|
||||
out_unlock:
|
||||
folio_unlock(folio);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
@ -356,6 +345,10 @@ int nfs_read_folio(struct file *file, struct folio *folio)
|
||||
if (NFS_STALE(inode))
|
||||
goto out_unlock;
|
||||
|
||||
ret = nfs_netfs_read_folio(file, folio);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
ctx = get_nfs_open_context(nfs_file_open_context(file));
|
||||
|
||||
xchg(&ctx->error, 0);
|
||||
@ -364,7 +357,7 @@ int nfs_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
ret = nfs_read_add_folio(&pgio, ctx, folio);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_put;
|
||||
|
||||
nfs_pageio_complete_read(&pgio);
|
||||
ret = pgio.pg_error < 0 ? pgio.pg_error : 0;
|
||||
@ -373,14 +366,14 @@ int nfs_read_folio(struct file *file, struct folio *folio)
|
||||
if (!folio_test_uptodate(folio) && !ret)
|
||||
ret = xchg(&ctx->error, 0);
|
||||
}
|
||||
out:
|
||||
out_put:
|
||||
put_nfs_open_context(ctx);
|
||||
out:
|
||||
trace_nfs_aop_readpage_done(inode, folio, ret);
|
||||
return ret;
|
||||
out_unlock:
|
||||
folio_unlock(folio);
|
||||
trace_nfs_aop_readpage_done(inode, folio, ret);
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
void nfs_readahead(struct readahead_control *ractl)
|
||||
@ -401,6 +394,10 @@ void nfs_readahead(struct readahead_control *ractl)
|
||||
if (NFS_STALE(inode))
|
||||
goto out;
|
||||
|
||||
ret = nfs_netfs_readahead(ractl);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
if (file == NULL) {
|
||||
ret = -EBADF;
|
||||
ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
|
||||
|
@ -105,6 +105,9 @@ struct nfs_pageio_descriptor {
|
||||
struct pnfs_layout_segment *pg_lseg;
|
||||
struct nfs_io_completion *pg_io_completion;
|
||||
struct nfs_direct_req *pg_dreq;
|
||||
#ifdef CONFIG_NFS_FSCACHE
|
||||
void *pg_netfs;
|
||||
#endif
|
||||
unsigned int pg_bsize; /* default bsize for mirrors */
|
||||
|
||||
u32 pg_mirror_count;
|
||||
|
@ -1619,6 +1619,9 @@ struct nfs_pgio_header {
|
||||
const struct nfs_rw_ops *rw_ops;
|
||||
struct nfs_io_completion *io_completion;
|
||||
struct nfs_direct_req *dreq;
|
||||
#ifdef CONFIG_NFS_FSCACHE
|
||||
void *netfs;
|
||||
#endif
|
||||
|
||||
int pnfs_error;
|
||||
int error; /* merge with pnfs_error */
|
||||
|
Loading…
Reference in New Issue
Block a user