mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
netfs: Cut over to using new writeback code
Cut over to using the new writeback code. The old code is #ifdef'd out or otherwise removed from compilation to avoid conflicts and will be removed in a future patch. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: Eric Van Hensbergen <ericvh@kernel.org> cc: Latchesar Ionkov <lucho@ionkov.net> cc: Dominique Martinet <asmadeus@codewreck.org> cc: Christian Schoenebeck <linux_oss@crudebyte.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org
This commit is contained in:
parent
64e64e6c18
commit
2df86547b2
@ -60,6 +60,7 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
|
||||
netfs_write_subrequest_terminated(subreq, len ?: err, false);
|
||||
}
|
||||
|
||||
#if 0 // TODO: Remove
|
||||
static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct p9_fid *fid = subreq->rreq->netfs_priv;
|
||||
@ -91,6 +92,7 @@ static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t sta
|
||||
if (subreq)
|
||||
netfs_queue_write_request(subreq);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* v9fs_issue_read - Issue a read from 9P
|
||||
@ -121,18 +123,15 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
{
|
||||
struct p9_fid *fid;
|
||||
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
|
||||
rreq->origin == NETFS_WRITEBACK ||
|
||||
rreq->origin == NETFS_WRITETHROUGH ||
|
||||
rreq->origin == NETFS_UNBUFFERED_WRITE ||
|
||||
rreq->origin == NETFS_DIO_WRITE);
|
||||
|
||||
#if 0 // TODO: Cut over
|
||||
if (rreq->origin == NETFS_WRITEBACK)
|
||||
return 0; /* We don't get the write handle until we find we
|
||||
* have actually dirty data and not just
|
||||
* copy-to-cache data.
|
||||
*/
|
||||
#endif
|
||||
|
||||
if (file) {
|
||||
fid = file->private_data;
|
||||
@ -179,7 +178,6 @@ const struct netfs_request_ops v9fs_req_ops = {
|
||||
.issue_read = v9fs_issue_read,
|
||||
.begin_writeback = v9fs_begin_writeback,
|
||||
.issue_write = v9fs_issue_write,
|
||||
.create_write_requests = v9fs_create_write_requests,
|
||||
};
|
||||
|
||||
const struct address_space_operations v9fs_addr_operations = {
|
||||
|
@ -353,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
if (file)
|
||||
rreq->netfs_priv = key_get(afs_file_key(file));
|
||||
rreq->rsize = 256 * 1024;
|
||||
rreq->wsize = 256 * 1024;
|
||||
rreq->wsize = 256 * 1024 * 1024;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -399,7 +399,6 @@ const struct netfs_request_ops afs_req_ops = {
|
||||
.issue_read = afs_issue_read,
|
||||
.update_i_size = afs_update_i_size,
|
||||
.invalidate_cache = afs_netfs_invalidate_cache,
|
||||
.create_write_requests = afs_create_write_requests,
|
||||
.begin_writeback = afs_begin_writeback,
|
||||
.prepare_write = afs_prepare_write,
|
||||
.issue_write = afs_issue_write,
|
||||
|
@ -1605,7 +1605,6 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
|
||||
extern int afs_fsync(struct file *, loff_t, loff_t, int);
|
||||
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
|
||||
extern void afs_prune_wb_keys(struct afs_vnode *);
|
||||
void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len);
|
||||
|
||||
/*
|
||||
* xattr.c
|
||||
|
@ -156,6 +156,7 @@ try_next_key:
|
||||
return afs_put_operation(op);
|
||||
}
|
||||
|
||||
#if 0 // TODO: Remove
|
||||
static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
|
||||
@ -193,6 +194,7 @@ void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size
|
||||
if (subreq)
|
||||
netfs_queue_write_request(subreq);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Writeback calls this when it finds a folio that needs uploading. This isn't
|
||||
|
@ -11,7 +11,6 @@ netfs-y := \
|
||||
main.o \
|
||||
misc.o \
|
||||
objects.o \
|
||||
output.o \
|
||||
write_collect.o \
|
||||
write_issue.o
|
||||
|
||||
|
@ -26,8 +26,6 @@ enum netfs_how_to_modify {
|
||||
NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
|
||||
};
|
||||
|
||||
static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
|
||||
|
||||
static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
|
||||
{
|
||||
void *priv = folio_get_private(folio);
|
||||
@ -180,7 +178,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
};
|
||||
struct netfs_io_request *wreq = NULL;
|
||||
struct netfs_folio *finfo;
|
||||
struct folio *folio;
|
||||
struct folio *folio, *writethrough = NULL;
|
||||
enum netfs_how_to_modify howto;
|
||||
enum netfs_folio_trace trace;
|
||||
unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
|
||||
@ -209,7 +207,6 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
}
|
||||
if (!is_sync_kiocb(iocb))
|
||||
wreq->iocb = iocb;
|
||||
wreq->cleanup = netfs_cleanup_buffered_write;
|
||||
netfs_stat(&netfs_n_wh_writethrough);
|
||||
} else {
|
||||
netfs_stat(&netfs_n_wh_buffered_write);
|
||||
@ -253,6 +250,16 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
offset = pos & (flen - 1);
|
||||
part = min_t(size_t, flen - offset, part);
|
||||
|
||||
/* Wait for writeback to complete. The writeback engine owns
|
||||
* the info in folio->private and may change it until it
|
||||
* removes the WB mark.
|
||||
*/
|
||||
if (folio_get_private(folio) &&
|
||||
folio_wait_writeback_killable(folio)) {
|
||||
ret = written ? -EINTR : -ERESTARTSYS;
|
||||
goto error_folio_unlock;
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
ret = written ? -EINTR : -ERESTARTSYS;
|
||||
goto error_folio_unlock;
|
||||
@ -327,6 +334,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
maybe_trouble = true;
|
||||
iov_iter_revert(iter, copied);
|
||||
copied = 0;
|
||||
folio_unlock(folio);
|
||||
goto retry;
|
||||
}
|
||||
netfs_set_group(folio, netfs_group);
|
||||
@ -382,23 +390,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
|
||||
if (likely(!wreq)) {
|
||||
folio_mark_dirty(folio);
|
||||
folio_unlock(folio);
|
||||
} else {
|
||||
if (folio_test_dirty(folio))
|
||||
/* Sigh. mmap. */
|
||||
folio_clear_dirty_for_io(folio);
|
||||
/* We make multiple writes to the folio... */
|
||||
if (!folio_test_writeback(folio)) {
|
||||
folio_start_writeback(folio);
|
||||
if (wreq->iter.count == 0)
|
||||
trace_netfs_folio(folio, netfs_folio_trace_wthru);
|
||||
else
|
||||
trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
|
||||
}
|
||||
netfs_advance_writethrough(wreq, copied,
|
||||
offset + copied == flen);
|
||||
netfs_advance_writethrough(wreq, &wbc, folio, copied,
|
||||
offset + copied == flen,
|
||||
&writethrough);
|
||||
/* Folio unlocked */
|
||||
}
|
||||
retry:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
folio = NULL;
|
||||
|
||||
@ -407,7 +406,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
|
||||
out:
|
||||
if (unlikely(wreq)) {
|
||||
ret2 = netfs_end_writethrough(wreq, iocb);
|
||||
ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
|
||||
wbc_detach_inode(&wbc);
|
||||
if (ret2 == -EIOCBQUEUED)
|
||||
return ret2;
|
||||
@ -529,12 +528,14 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
|
||||
|
||||
sb_start_pagefault(inode->i_sb);
|
||||
|
||||
if (folio_wait_writeback_killable(folio))
|
||||
goto out;
|
||||
|
||||
if (folio_lock_killable(folio) < 0)
|
||||
goto out;
|
||||
|
||||
if (folio_wait_writeback_killable(folio)) {
|
||||
ret = VM_FAULT_LOCKED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Can we see a streaming write here? */
|
||||
if (WARN_ON(!folio_test_uptodate(folio))) {
|
||||
ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
|
||||
@ -573,6 +574,7 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_page_mkwrite);
|
||||
|
||||
#if 0 // TODO: Remove
|
||||
/*
|
||||
* Kill all the pages in the given range
|
||||
*/
|
||||
@ -1199,3 +1201,4 @@ out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_writepages);
|
||||
#endif
|
||||
|
@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
|
||||
unsigned long long start = iocb->ki_pos;
|
||||
unsigned long long end = start + iov_iter_count(iter);
|
||||
ssize_t ret, n;
|
||||
size_t len = iov_iter_count(iter);
|
||||
bool async = !is_sync_kiocb(iocb);
|
||||
|
||||
_enter("");
|
||||
@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
|
||||
|
||||
_debug("uw %llx-%llx", start, end);
|
||||
|
||||
wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
|
||||
start, end - start,
|
||||
iocb->ki_flags & IOCB_DIRECT ?
|
||||
NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
|
||||
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
|
||||
iocb->ki_flags & IOCB_DIRECT ?
|
||||
NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
|
||||
if (IS_ERR(wreq))
|
||||
return PTR_ERR(wreq);
|
||||
|
||||
wreq->io_streams[0].avail = true;
|
||||
trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
|
||||
netfs_write_trace_dio_write :
|
||||
netfs_write_trace_unbuffered_write));
|
||||
|
||||
{
|
||||
/* If this is an async op and we're not using a bounce buffer,
|
||||
* we have to save the source buffer as the iterator is only
|
||||
@ -63,7 +68,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
|
||||
* request.
|
||||
*/
|
||||
if (async || user_backed_iter(iter)) {
|
||||
n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0);
|
||||
n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
|
||||
if (n < 0) {
|
||||
ret = n;
|
||||
goto out;
|
||||
@ -71,7 +76,6 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
|
||||
wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
|
||||
wreq->direct_bv_count = n;
|
||||
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
|
||||
wreq->len = iov_iter_count(&wreq->iter);
|
||||
} else {
|
||||
wreq->iter = *iter;
|
||||
}
|
||||
@ -79,6 +83,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
|
||||
wreq->io_iter = wreq->iter;
|
||||
}
|
||||
|
||||
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
|
||||
|
||||
/* Copy the data into the bounce buffer and encrypt it. */
|
||||
// TODO
|
||||
|
||||
@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
|
||||
if (async)
|
||||
wreq->iocb = iocb;
|
||||
wreq->cleanup = netfs_cleanup_dio_write;
|
||||
ret = netfs_begin_write(wreq, is_sync_kiocb(iocb),
|
||||
iocb->ki_flags & IOCB_DIRECT ?
|
||||
netfs_write_trace_dio_write :
|
||||
netfs_write_trace_unbuffered_write);
|
||||
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
|
||||
if (ret < 0) {
|
||||
_debug("begin = %zd", ret);
|
||||
goto out;
|
||||
@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
|
||||
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
|
||||
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
smp_rmb(); /* Read error/transferred after RIP flag */
|
||||
ret = wreq->error;
|
||||
_debug("waited = %zd", ret);
|
||||
if (ret == 0) {
|
||||
ret = wreq->transferred;
|
||||
iocb->ki_pos += ret;
|
||||
|
@ -92,15 +92,6 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
|
||||
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
|
||||
}
|
||||
|
||||
/*
|
||||
* output.c
|
||||
*/
|
||||
int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
|
||||
enum netfs_write_trace what);
|
||||
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
|
||||
int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end);
|
||||
int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb);
|
||||
|
||||
/*
|
||||
* stats.c
|
||||
*/
|
||||
@ -172,12 +163,12 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
|
||||
int netfs_advance_write(struct netfs_io_request *wreq,
|
||||
struct netfs_io_stream *stream,
|
||||
loff_t start, size_t len, bool to_eof);
|
||||
struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len);
|
||||
int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *folio, size_t copied, bool to_page_end,
|
||||
struct folio **writethrough_cache);
|
||||
int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *writethrough_cache);
|
||||
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
|
||||
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *folio, size_t copied, bool to_page_end,
|
||||
struct folio **writethrough_cache);
|
||||
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *writethrough_cache);
|
||||
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
|
||||
|
||||
/*
|
||||
|
@ -709,7 +709,7 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
|
||||
}
|
||||
|
||||
/**
|
||||
* new_netfs_write_subrequest_terminated - Note the termination of a write operation.
|
||||
* netfs_write_subrequest_terminated - Note the termination of a write operation.
|
||||
* @_op: The I/O request that has terminated.
|
||||
* @transferred_or_error: The amount of data transferred or an error code.
|
||||
* @was_async: The termination was asynchronous
|
||||
@ -731,8 +731,8 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
|
||||
* Note that %_op is a void* so that the function can be passed to
|
||||
* kiocb::term_func without the need for a casting wrapper.
|
||||
*/
|
||||
void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
|
||||
bool was_async)
|
||||
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
|
||||
bool was_async)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq = _op;
|
||||
struct netfs_io_request *wreq = subreq->rreq;
|
||||
@ -800,4 +800,4 @@ void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_err
|
||||
|
||||
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
|
||||
}
|
||||
EXPORT_SYMBOL(new_netfs_write_subrequest_terminated);
|
||||
EXPORT_SYMBOL(netfs_write_subrequest_terminated);
|
||||
|
@ -494,8 +494,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
|
||||
/*
|
||||
* Write some of the pending data back to the server
|
||||
*/
|
||||
int new_netfs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
int netfs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct netfs_inode *ictx = netfs_inode(mapping->host);
|
||||
struct netfs_io_request *wreq = NULL;
|
||||
@ -556,12 +556,12 @@ out:
|
||||
_leave(" = %d", error);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(new_netfs_writepages);
|
||||
EXPORT_SYMBOL(netfs_writepages);
|
||||
|
||||
/*
|
||||
* Begin a write operation for writing through the pagecache.
|
||||
*/
|
||||
struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len)
|
||||
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
|
||||
{
|
||||
struct netfs_io_request *wreq = NULL;
|
||||
struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp));
|
||||
@ -586,9 +586,9 @@ struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t
|
||||
* to the request. If we've added more than wsize then we need to create a new
|
||||
* subrequest.
|
||||
*/
|
||||
int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *folio, size_t copied, bool to_page_end,
|
||||
struct folio **writethrough_cache)
|
||||
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *folio, size_t copied, bool to_page_end,
|
||||
struct folio **writethrough_cache)
|
||||
{
|
||||
_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
|
||||
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
|
||||
@ -618,8 +618,8 @@ int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeba
|
||||
/*
|
||||
* End a write operation used when writing through the pagecache.
|
||||
*/
|
||||
int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *writethrough_cache)
|
||||
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *writethrough_cache)
|
||||
{
|
||||
struct netfs_inode *ictx = netfs_inode(wreq->inode);
|
||||
int ret;
|
||||
|
@ -303,8 +303,6 @@ struct netfs_request_ops {
|
||||
void (*update_i_size)(struct inode *inode, loff_t i_size);
|
||||
|
||||
/* Write request handling */
|
||||
void (*create_write_requests)(struct netfs_io_request *wreq,
|
||||
loff_t start, size_t len);
|
||||
void (*begin_writeback)(struct netfs_io_request *wreq);
|
||||
void (*prepare_write)(struct netfs_io_subrequest *subreq);
|
||||
void (*issue_write)(struct netfs_io_subrequest *subreq);
|
||||
@ -409,8 +407,6 @@ int netfs_write_begin(struct netfs_inode *, struct file *,
|
||||
struct folio **, void **fsdata);
|
||||
int netfs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc);
|
||||
int new_netfs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc);
|
||||
bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||
int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
|
||||
void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
|
||||
@ -431,14 +427,9 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
|
||||
iov_iter_extraction_t extraction_flags);
|
||||
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
|
||||
size_t max_size, size_t max_segs);
|
||||
struct netfs_io_subrequest *netfs_create_write_request(
|
||||
struct netfs_io_request *wreq, enum netfs_io_source dest,
|
||||
loff_t start, size_t len, work_func_t worker);
|
||||
void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);
|
||||
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
|
||||
bool was_async);
|
||||
void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
|
||||
bool was_async);
|
||||
void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
|
||||
|
||||
int netfs_start_io_read(struct inode *inode);
|
||||
|
Loading…
Reference in New Issue
Block a user