forked from Minki/linux
netfs: Change ->init_request() to return an error code
Change the request initialisation function to return an error code so that the network filesystem can return a failure (ENOMEM, for example). This will also allow ceph to abort a ->readahead() op if the server refuses to give it a cap allowing local caching from within the netfslib framework (errors aren't passed back through ->readahead(), so returning, say, -ENOBUFS will cause the op to be aborted). Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164678212401.1200972.16537041523832944934.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692905398.2099075.5238033621684646524.stgit@warthog.procyon.org.uk/ # v3
This commit is contained in:
parent
663dfb65c3
commit
2de1604173
@ -56,12 +56,13 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
* @rreq: The read request
|
||||
* @file: The file being read from
|
||||
*/
|
||||
static void v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
{
|
||||
struct p9_fid *fid = file->private_data;
|
||||
|
||||
refcount_inc(&fid->count);
|
||||
rreq->netfs_priv = fid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -359,9 +359,10 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void afs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
{
|
||||
rreq->netfs_priv = key_get(afs_file_key(file));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool afs_is_cache_enabled(struct inode *inode)
|
||||
|
@ -20,27 +20,34 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
||||
{
|
||||
static atomic_t debug_ids;
|
||||
struct netfs_io_request *rreq;
|
||||
int ret;
|
||||
|
||||
rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
|
||||
if (rreq) {
|
||||
rreq->start = start;
|
||||
rreq->len = len;
|
||||
rreq->origin = origin;
|
||||
rreq->netfs_ops = ops;
|
||||
rreq->netfs_priv = netfs_priv;
|
||||
rreq->mapping = mapping;
|
||||
rreq->inode = file_inode(file);
|
||||
rreq->i_size = i_size_read(rreq->inode);
|
||||
rreq->debug_id = atomic_inc_return(&debug_ids);
|
||||
INIT_LIST_HEAD(&rreq->subrequests);
|
||||
INIT_WORK(&rreq->work, netfs_rreq_work);
|
||||
refcount_set(&rreq->ref, 1);
|
||||
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
||||
if (ops->init_request)
|
||||
ops->init_request(rreq, file);
|
||||
netfs_stat(&netfs_n_rh_rreq);
|
||||
if (!rreq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rreq->start = start;
|
||||
rreq->len = len;
|
||||
rreq->origin = origin;
|
||||
rreq->netfs_ops = ops;
|
||||
rreq->netfs_priv = netfs_priv;
|
||||
rreq->mapping = mapping;
|
||||
rreq->inode = file_inode(file);
|
||||
rreq->i_size = i_size_read(rreq->inode);
|
||||
rreq->debug_id = atomic_inc_return(&debug_ids);
|
||||
INIT_LIST_HEAD(&rreq->subrequests);
|
||||
INIT_WORK(&rreq->work, netfs_rreq_work);
|
||||
refcount_set(&rreq->ref, 1);
|
||||
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
||||
if (rreq->netfs_ops->init_request) {
|
||||
ret = rreq->netfs_ops->init_request(rreq, file);
|
||||
if (ret < 0) {
|
||||
kfree(rreq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
netfs_stat(&netfs_n_rh_rreq);
|
||||
return rreq;
|
||||
}
|
||||
|
||||
|
@ -768,7 +768,7 @@ void netfs_readahead(struct readahead_control *ractl,
|
||||
readahead_pos(ractl),
|
||||
readahead_length(ractl),
|
||||
NETFS_READAHEAD);
|
||||
if (!rreq)
|
||||
if (IS_ERR(rreq))
|
||||
goto cleanup;
|
||||
|
||||
if (ops->begin_cache_operation) {
|
||||
@ -842,11 +842,9 @@ int netfs_readpage(struct file *file,
|
||||
rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv,
|
||||
folio_file_pos(folio), folio_size(folio),
|
||||
NETFS_READPAGE);
|
||||
if (!rreq) {
|
||||
if (netfs_priv)
|
||||
ops->cleanup(folio_file_mapping(folio), netfs_priv);
|
||||
folio_unlock(folio);
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(rreq)) {
|
||||
ret = PTR_ERR(rreq);
|
||||
goto alloc_error;
|
||||
}
|
||||
|
||||
if (ops->begin_cache_operation) {
|
||||
@ -887,6 +885,11 @@ int netfs_readpage(struct file *file,
|
||||
out:
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
|
||||
return ret;
|
||||
alloc_error:
|
||||
if (netfs_priv)
|
||||
ops->cleanup(folio_file_mapping(folio), netfs_priv);
|
||||
folio_unlock(folio);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_readpage);
|
||||
|
||||
@ -1007,12 +1010,13 @@ retry:
|
||||
goto have_folio_no_wait;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
rreq = netfs_alloc_request(mapping, file, ops, netfs_priv,
|
||||
folio_file_pos(folio), folio_size(folio),
|
||||
NETFS_READ_FOR_WRITE);
|
||||
if (!rreq)
|
||||
if (IS_ERR(rreq)) {
|
||||
ret = PTR_ERR(rreq);
|
||||
goto error;
|
||||
}
|
||||
rreq->no_unlock_folio = folio_index(folio);
|
||||
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
|
||||
netfs_priv = NULL;
|
||||
|
@ -193,7 +193,7 @@ struct netfs_io_request {
|
||||
*/
|
||||
struct netfs_request_ops {
|
||||
bool (*is_cache_enabled)(struct inode *inode);
|
||||
void (*init_request)(struct netfs_io_request *rreq, struct file *file);
|
||||
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
|
||||
int (*begin_cache_operation)(struct netfs_io_request *rreq);
|
||||
void (*expand_readahead)(struct netfs_io_request *rreq);
|
||||
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
|
||||
|
Loading…
Reference in New Issue
Block a user