mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
netfs: Make the refcounting of netfs_begin_read() easier to use
Make the refcounting of netfs_begin_read() easier to use by not eating the caller's ref on the netfs_io_request it's given. This makes it easier to use when we need to look in the request struct after. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org
This commit is contained in:
parent
6ba22d8d15
commit
4fcccc38eb
@ -210,6 +210,7 @@ void netfs_readahead(struct readahead_control *ractl)
|
||||
;
|
||||
|
||||
netfs_begin_read(rreq, false);
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
|
||||
return;
|
||||
|
||||
cleanup_free:
|
||||
@ -260,7 +261,9 @@ int netfs_read_folio(struct file *file, struct folio *folio)
|
||||
iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
|
||||
rreq->start, rreq->len);
|
||||
|
||||
return netfs_begin_read(rreq, true);
|
||||
ret = netfs_begin_read(rreq, true);
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
|
||||
return ret;
|
||||
|
||||
discard:
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
|
||||
@ -429,6 +432,7 @@ retry:
|
||||
ret = netfs_begin_read(rreq, true);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
|
||||
|
||||
have_folio:
|
||||
ret = folio_wait_fscache_killable(folio);
|
||||
|
@ -362,6 +362,7 @@ again:
|
||||
|
||||
netfs_rreq_unlock_folios(rreq);
|
||||
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
|
||||
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
||||
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
|
||||
|
||||
@ -657,7 +658,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
|
||||
|
||||
if (rreq->len == 0) {
|
||||
pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -665,12 +665,10 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
|
||||
|
||||
INIT_WORK(&rreq->work, netfs_rreq_work);
|
||||
|
||||
if (sync)
|
||||
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
|
||||
|
||||
/* Chop the read into slices according to what the cache and the netfs
|
||||
* want and submit each one.
|
||||
*/
|
||||
netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
|
||||
atomic_set(&rreq->nr_outstanding, 1);
|
||||
io_iter = rreq->io_iter;
|
||||
do {
|
||||
@ -680,25 +678,25 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
|
||||
} while (rreq->submitted < rreq->len);
|
||||
|
||||
if (sync) {
|
||||
/* Keep nr_outstanding incremented so that the ref always belongs to
|
||||
* us, and the service code isn't punted off to a random thread pool to
|
||||
* process.
|
||||
/* Keep nr_outstanding incremented so that the ref always
|
||||
* belongs to us, and the service code isn't punted off to a
|
||||
* random thread pool to process. Note that this might start
|
||||
* further work, such as writing to the cache.
|
||||
*/
|
||||
for (;;) {
|
||||
wait_var_event(&rreq->nr_outstanding,
|
||||
atomic_read(&rreq->nr_outstanding) == 1);
|
||||
wait_var_event(&rreq->nr_outstanding,
|
||||
atomic_read(&rreq->nr_outstanding) == 1);
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_assess(rreq, false);
|
||||
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
|
||||
break;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
|
||||
wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
ret = rreq->error;
|
||||
if (ret == 0 && rreq->submitted < rreq->len) {
|
||||
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
|
||||
ret = -EIO;
|
||||
}
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
|
||||
} else {
|
||||
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
|
@ -34,7 +34,9 @@
|
||||
EM(netfs_rreq_trace_free, "FREE ") \
|
||||
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
|
||||
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
|
||||
E_(netfs_rreq_trace_unmark, "UNMARK ")
|
||||
EM(netfs_rreq_trace_unmark, "UNMARK ") \
|
||||
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
|
||||
E_(netfs_rreq_trace_wake_ip, "WAKE-IP")
|
||||
|
||||
#define netfs_sreq_sources \
|
||||
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
|
||||
@ -65,14 +67,13 @@
|
||||
E_(netfs_fail_prepare_write, "prep-write")
|
||||
|
||||
#define netfs_rreq_ref_traces \
|
||||
EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
|
||||
EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
|
||||
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
|
||||
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
|
||||
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
|
||||
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
|
||||
EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
|
||||
EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
|
||||
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
|
||||
EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
|
||||
E_(netfs_rreq_trace_new, "NEW ")
|
||||
|
||||
#define netfs_sreq_ref_traces \
|
||||
|
Loading…
Reference in New Issue
Block a user