mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 23:21:31 +00:00
cifs: Implement netfslib hooks
Provide implementation of the netfslib hooks that will be used by netfslib to ask cifs to set up and perform operations. Of particular note are (*) cifs_clamp_length() - This is used to negotiate the size of the next subrequest in a read request, taking into account the credit available and the rsize. The credits are attached to the subrequest. (*) cifs_req_issue_read() - This is used to issue a subrequest that has been set up and clamped. (*) cifs_prepare_write() - This prepares to fill a subrequest by picking a channel, reopening the file and requesting credits so that we can set the maximum size of the subrequest and also sets the maximum number of segments if we're doing RDMA. (*) cifs_issue_write() - This releases any unneeded credits and issues an asynchronous data write for the contiguous slice of file covered by the subrequest. This should possibly be folded in to all ->async_writev() ops and that called directly. (*) cifs_begin_writeback() - This gets the cached writable handle through which we do writeback (this does not affect writethrough, unbuffered or direct writes). At this point, cifs is not wired up to actually *use* netfslib; that will be done in a subsequent patch. Signed-off-by: David Howells <dhowells@redhat.com> cc: Steve French <sfrench@samba.org> cc: Shyam Prasad N <nspmangalore@gmail.com> cc: Rohith Surabattula <rohiths.msft@gmail.com> cc: Jeff Layton <jlayton@kernel.org> cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org
This commit is contained in:
parent
c20c0d7325
commit
69c3c023af
@ -405,6 +405,9 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
} while (iov_iter_count(iter));
|
||||
|
||||
out:
|
||||
if (likely(written) && ctx->ops->post_modify)
|
||||
ctx->ops->post_modify(inode);
|
||||
|
||||
if (unlikely(wreq)) {
|
||||
ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
|
||||
wbc_detach_inode(&wbc);
|
||||
@ -521,6 +524,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
|
||||
struct folio *folio = page_folio(vmf->page);
|
||||
struct file *file = vmf->vma->vm_file;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct netfs_inode *ictx = netfs_inode(inode);
|
||||
vm_fault_t ret = VM_FAULT_RETRY;
|
||||
int err;
|
||||
|
||||
@ -567,6 +571,8 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
|
||||
trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
|
||||
netfs_set_group(folio, netfs_group);
|
||||
file_update_time(file);
|
||||
if (ictx->ops->post_modify)
|
||||
ictx->ops->post_modify(inode);
|
||||
ret = VM_FAULT_LOCKED;
|
||||
out:
|
||||
sb_end_pagefault(inode->i_sb);
|
||||
|
@ -2,6 +2,7 @@
|
||||
config CIFS
|
||||
tristate "SMB3 and CIFS support (advanced network filesystem)"
|
||||
depends on INET
|
||||
select NETFS_SUPPORT
|
||||
select NLS
|
||||
select NLS_UCS2_UTILS
|
||||
select CRYPTO
|
||||
|
@ -1758,7 +1758,7 @@ static int cifs_init_netfs(void)
|
||||
{
|
||||
cifs_io_request_cachep =
|
||||
kmem_cache_create("cifs_io_request",
|
||||
sizeof(struct netfs_io_request), 0,
|
||||
sizeof(struct cifs_io_request), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!cifs_io_request_cachep)
|
||||
goto nomem_req;
|
||||
|
@ -84,6 +84,7 @@ extern const struct inode_operations cifs_namespace_inode_operations;
|
||||
|
||||
|
||||
/* Functions related to files and directories */
|
||||
extern const struct netfs_request_ops cifs_req_ops;
|
||||
extern const struct file_operations cifs_file_ops;
|
||||
extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
|
||||
extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
|
||||
|
@ -1491,15 +1491,24 @@ struct cifs_aio_ctx {
|
||||
bool direct_io;
|
||||
};
|
||||
|
||||
struct cifs_io_request {
|
||||
struct netfs_io_request rreq;
|
||||
struct cifsFileInfo *cfile;
|
||||
};
|
||||
|
||||
/* asynchronous read support */
|
||||
struct cifs_io_subrequest {
|
||||
struct netfs_io_subrequest subreq;
|
||||
struct cifsFileInfo *cfile;
|
||||
struct address_space *mapping;
|
||||
struct cifs_aio_ctx *ctx;
|
||||
union {
|
||||
struct netfs_io_subrequest subreq;
|
||||
struct netfs_io_request *rreq;
|
||||
struct cifs_io_request *req;
|
||||
};
|
||||
ssize_t got_bytes;
|
||||
pid_t pid;
|
||||
unsigned int xid;
|
||||
int result;
|
||||
bool have_xid;
|
||||
bool replay;
|
||||
struct kvec iov[2];
|
||||
struct TCP_Server_Info *server;
|
||||
#ifdef CONFIG_CIFS_SMB_DIRECT
|
||||
@ -1507,15 +1516,16 @@ struct cifs_io_subrequest {
|
||||
#endif
|
||||
struct cifs_credits credits;
|
||||
|
||||
enum writeback_sync_modes sync_mode;
|
||||
bool uncached;
|
||||
bool replay;
|
||||
struct bio_vec *bv;
|
||||
|
||||
// TODO: Remove following elements
|
||||
struct list_head list;
|
||||
struct completion done;
|
||||
struct work_struct work;
|
||||
struct cifsFileInfo *cfile;
|
||||
struct address_space *mapping;
|
||||
struct cifs_aio_ctx *ctx;
|
||||
enum writeback_sync_modes sync_mode;
|
||||
bool uncached;
|
||||
struct bio_vec *bv;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -36,6 +36,321 @@
|
||||
#include "fs_context.h"
|
||||
#include "cifs_ioctl.h"
|
||||
#include "cached_dir.h"
|
||||
#include <trace/events/netfs.h>
|
||||
|
||||
static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
|
||||
|
||||
/*
|
||||
* Prepare a subrequest to upload to the server. We need to allocate credits
|
||||
* so that we know the maximum amount of data that we can include in it.
|
||||
*/
|
||||
static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct cifs_io_subrequest *wdata =
|
||||
container_of(subreq, struct cifs_io_subrequest, subreq);
|
||||
struct cifs_io_request *req = wdata->req;
|
||||
struct TCP_Server_Info *server;
|
||||
struct cifsFileInfo *open_file = req->cfile;
|
||||
size_t wsize = req->rreq.wsize;
|
||||
int rc;
|
||||
|
||||
if (!wdata->have_xid) {
|
||||
wdata->xid = get_xid();
|
||||
wdata->have_xid = true;
|
||||
}
|
||||
|
||||
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
|
||||
wdata->server = server;
|
||||
|
||||
retry:
|
||||
if (open_file->invalidHandle) {
|
||||
rc = cifs_reopen_file(open_file, false);
|
||||
if (rc < 0) {
|
||||
if (rc == -EAGAIN)
|
||||
goto retry;
|
||||
subreq->error = rc;
|
||||
return netfs_prepare_write_failed(subreq);
|
||||
}
|
||||
}
|
||||
|
||||
rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
|
||||
&wdata->credits);
|
||||
if (rc < 0) {
|
||||
subreq->error = rc;
|
||||
return netfs_prepare_write_failed(subreq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CIFS_SMB_DIRECT
|
||||
if (server->smbd_conn)
|
||||
subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a subrequest to upload to the server.
|
||||
*/
|
||||
static void cifs_issue_write(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct cifs_io_subrequest *wdata =
|
||||
container_of(subreq, struct cifs_io_subrequest, subreq);
|
||||
struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
|
||||
int rc;
|
||||
|
||||
if (cifs_forced_shutdown(sbi)) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = -EAGAIN;
|
||||
if (wdata->req->cfile->invalidHandle)
|
||||
goto fail;
|
||||
|
||||
wdata->server->ops->async_writev(wdata);
|
||||
out:
|
||||
return;
|
||||
|
||||
fail:
|
||||
if (rc == -EAGAIN)
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
else
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
|
||||
add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
|
||||
netfs_write_subrequest_terminated(wdata, rc, false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Split the read up according to how many credits we can get for each piece.
|
||||
* It's okay to sleep here if we need to wait for more credit to become
|
||||
* available.
|
||||
*
|
||||
* We also choose the server and allocate an operation ID to be cleaned up
|
||||
* later.
|
||||
*/
|
||||
static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
struct TCP_Server_Info *server;
|
||||
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
|
||||
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
|
||||
size_t rsize = 0;
|
||||
int rc;
|
||||
|
||||
rdata->xid = get_xid();
|
||||
rdata->have_xid = true;
|
||||
|
||||
server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
|
||||
rdata->server = server;
|
||||
|
||||
if (cifs_sb->ctx->rsize == 0)
|
||||
cifs_sb->ctx->rsize =
|
||||
server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
|
||||
cifs_sb->ctx);
|
||||
|
||||
|
||||
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
|
||||
&rdata->credits);
|
||||
if (rc) {
|
||||
subreq->error = rc;
|
||||
return false;
|
||||
}
|
||||
|
||||
subreq->len = min_t(size_t, subreq->len, rsize);
|
||||
#ifdef CONFIG_CIFS_SMB_DIRECT
|
||||
if (server->smbd_conn)
|
||||
subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a read operation on behalf of the netfs helper functions. We're asked
|
||||
* to make a read of a certain size at a point in the file. We are permitted
|
||||
* to only read a portion of that, but as long as we read something, the netfs
|
||||
* helper will call us again so that we can issue another read.
|
||||
*/
|
||||
static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
|
||||
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
|
||||
pid_t pid;
|
||||
int rc = 0;
|
||||
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
|
||||
pid = req->cfile->pid;
|
||||
else
|
||||
pid = current->tgid; // Ummm... This may be a workqueue
|
||||
|
||||
cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
|
||||
__func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
|
||||
subreq->transferred, subreq->len);
|
||||
|
||||
if (req->cfile->invalidHandle) {
|
||||
do {
|
||||
rc = cifs_reopen_file(req->cfile, true);
|
||||
} while (rc == -EAGAIN);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
rdata->pid = pid;
|
||||
|
||||
rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len);
|
||||
if (!rc) {
|
||||
if (rdata->req->cfile->invalidHandle)
|
||||
rc = -EAGAIN;
|
||||
else
|
||||
rc = rdata->server->ops->async_readv(rdata);
|
||||
}
|
||||
|
||||
out:
|
||||
if (rc)
|
||||
netfs_subreq_terminated(subreq, rc, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Writeback calls this when it finds a folio that needs uploading. This isn't
|
||||
* called if writeback only has copy-to-cache to deal with.
|
||||
*/
|
||||
static void cifs_begin_writeback(struct netfs_io_request *wreq)
|
||||
{
|
||||
struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
|
||||
int ret;
|
||||
|
||||
ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
|
||||
if (ret) {
|
||||
cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
wreq->io_streams[0].avail = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise a request.
|
||||
*/
|
||||
static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
{
|
||||
struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
|
||||
struct cifsFileInfo *open_file = NULL;
|
||||
|
||||
rreq->rsize = cifs_sb->ctx->rsize;
|
||||
rreq->wsize = cifs_sb->ctx->wsize;
|
||||
|
||||
if (file) {
|
||||
open_file = file->private_data;
|
||||
rreq->netfs_priv = file->private_data;
|
||||
req->cfile = cifsFileInfo_get(open_file);
|
||||
} else if (rreq->origin != NETFS_WRITEBACK) {
|
||||
WARN_ON_ONCE(1);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Expand the size of a readahead to the size of the rsize, if at least as
|
||||
* large as a page, allowing for the possibility that rsize is not pow-2
|
||||
* aligned.
|
||||
*/
|
||||
static void cifs_expand_readahead(struct netfs_io_request *rreq)
|
||||
{
|
||||
unsigned int rsize = rreq->rsize;
|
||||
loff_t misalignment, i_size = i_size_read(rreq->inode);
|
||||
|
||||
if (rsize < PAGE_SIZE)
|
||||
return;
|
||||
|
||||
if (rsize < INT_MAX)
|
||||
rsize = roundup_pow_of_two(rsize);
|
||||
else
|
||||
rsize = ((unsigned int)INT_MAX + 1) / 2;
|
||||
|
||||
misalignment = rreq->start & (rsize - 1);
|
||||
if (misalignment) {
|
||||
rreq->start -= misalignment;
|
||||
rreq->len += misalignment;
|
||||
}
|
||||
|
||||
rreq->len = round_up(rreq->len, rsize);
|
||||
if (rreq->start < i_size && rreq->len > i_size - rreq->start)
|
||||
rreq->len = i_size - rreq->start;
|
||||
}
|
||||
|
||||
/*
|
||||
* Completion of a request operation.
|
||||
*/
|
||||
static void cifs_rreq_done(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct timespec64 atime, mtime;
|
||||
struct inode *inode = rreq->inode;
|
||||
|
||||
/* we do not want atime to be less than mtime, it broke some apps */
|
||||
atime = inode_set_atime_to_ts(inode, current_time(inode));
|
||||
mtime = inode_get_mtime(inode);
|
||||
if (timespec64_compare(&atime, &mtime))
|
||||
inode_set_atime_to_ts(inode, inode_get_mtime(inode));
|
||||
}
|
||||
|
||||
static void cifs_post_modify(struct inode *inode)
|
||||
{
|
||||
/* Indication to update ctime and mtime as close is deferred */
|
||||
set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
|
||||
}
|
||||
|
||||
static void cifs_free_request(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
|
||||
|
||||
if (req->cfile)
|
||||
cifsFileInfo_put(req->cfile);
|
||||
}
|
||||
|
||||
static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct cifs_io_subrequest *rdata =
|
||||
container_of(subreq, struct cifs_io_subrequest, subreq);
|
||||
int rc = subreq->error;
|
||||
|
||||
if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
|
||||
#ifdef CONFIG_CIFS_SMB_DIRECT
|
||||
if (rdata->mr) {
|
||||
smbd_deregister_mr(rdata->mr);
|
||||
rdata->mr = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
|
||||
if (rdata->have_xid)
|
||||
free_xid(rdata->xid);
|
||||
}
|
||||
|
||||
const struct netfs_request_ops cifs_req_ops = {
|
||||
.request_pool = &cifs_io_request_pool,
|
||||
.subrequest_pool = &cifs_io_subrequest_pool,
|
||||
.init_request = cifs_init_request,
|
||||
.free_request = cifs_free_request,
|
||||
.free_subrequest = cifs_free_subrequest,
|
||||
.expand_readahead = cifs_expand_readahead,
|
||||
.clamp_length = cifs_clamp_length,
|
||||
.issue_read = cifs_req_issue_read,
|
||||
.done = cifs_rreq_done,
|
||||
.post_modify = cifs_post_modify,
|
||||
.begin_writeback = cifs_begin_writeback,
|
||||
.prepare_write = cifs_prepare_write,
|
||||
.issue_write = cifs_issue_write,
|
||||
};
|
||||
|
||||
/*
|
||||
* Remove the dirty flags from a span of pages.
|
||||
|
@ -302,6 +302,7 @@ struct netfs_request_ops {
|
||||
|
||||
/* Modification handling */
|
||||
void (*update_i_size)(struct inode *inode, loff_t i_size);
|
||||
void (*post_modify)(struct inode *inode);
|
||||
|
||||
/* Write request handling */
|
||||
void (*begin_writeback)(struct netfs_io_request *wreq);
|
||||
|
@ -112,6 +112,7 @@
|
||||
#define netfs_sreq_ref_traces \
|
||||
EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \
|
||||
EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \
|
||||
EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \
|
||||
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
|
||||
EM(netfs_sreq_trace_new, "NEW ") \
|
||||
EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
|
||||
|
Loading…
Reference in New Issue
Block a user