forked from Minki/linux
block: trace completion of all bios.
Currently only dm and md/raid5 bios trigger trace_block_bio_complete(). Now that we have bio_chain() and bio_inc_remaining(), it is not possible, in general, for a driver to know when the bio is really complete. Only bio_endio() knows that. So move the trace_block_bio_complete() call to bio_endio(). Now trace_block_bio_complete() pairs with trace_block_bio_queue(). Any bio for which a 'queue' event is traced, will subsequently generate a 'complete' event. There are a few cases where completion tracing is not wanted. 1/ If blk_update_request() has already generated a completion trace event at the 'request' level, there is no point generating one at the bio level too. In this case the bi_sector and bi_size will have changed, so the bio level event would be wrong 2/ If the bio hasn't actually been queued yet, but is being aborted early, then a trace event could be confusing. Some filesystems call bio_endio() but do not want tracing. 3/ The bio_integrity code interposes itself by replacing bi_end_io, then restoring it and calling bio_endio() again. This would produce two identical trace events if left like that. To handle these, we introduce a flag BIO_TRACE_COMPLETION and only produce the trace event when this is set. We address point 1 above by clearing the flag in blk_update_request(). We address point 2 above by only setting the flag when generic_make_request() is called. We address point 3 above by clearing the flag after generating a completion event. When bio_split() is used on a bio, particularly in blk_queue_split(), there is an extra complication. A new bio is split off the front, and may be handle directly without going through generic_make_request(). The old bio, which has been advanced, is passed to generic_make_request(), so it will trigger a trace event a second time. Probably the best result when a split happens is to see a single 'queue' event for the whole bio, then multiple 'complete' events - one for each component. To achieve this was can: - copy the BIO_TRACE_COMPLETION flag to the new bio in bio_split() - avoid generating a 'queue' event if BIO_TRACE_COMPLETION is already set. This way, the split-off bio won't create a queue event, the original won't either even if it re-submitted to generic_make_request(), but both will produce completion events, each for their own range. So if generic_make_request() is called (which generates a QUEUED event), then bi_endio() will create a single COMPLETE event for each range that the bio is split into, unless the driver has explicitly requested it not to. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
dbde775cdb
commit
fbbaf700e7
14
block/bio.c
14
block/bio.c
@ -1826,6 +1826,11 @@ static inline bool bio_remaining_done(struct bio *bio)
|
||||
* bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
|
||||
* way to end I/O on a bio. No one should call bi_end_io() directly on a
|
||||
* bio unless they own it and thus know that it has an end_io function.
|
||||
*
|
||||
* bio_endio() can be called several times on a bio that has been chained
|
||||
* using bio_chain(). The ->bi_end_io() function will only be called the
|
||||
* last time. At this point the BLK_TA_COMPLETE tracing event will be
|
||||
* generated if BIO_TRACE_COMPLETION is set.
|
||||
**/
|
||||
void bio_endio(struct bio *bio)
|
||||
{
|
||||
@ -1846,6 +1851,12 @@ again:
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
|
||||
bio, bio->bi_error);
|
||||
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
||||
}
|
||||
|
||||
blk_throtl_bio_endio(bio);
|
||||
if (bio->bi_end_io)
|
||||
bio->bi_end_io(bio);
|
||||
@ -1885,6 +1896,9 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
||||
|
||||
bio_advance(bio, split->bi_iter.bi_size);
|
||||
|
||||
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
|
||||
bio_set_flag(bio, BIO_TRACE_COMPLETION);
|
||||
|
||||
return split;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_split);
|
||||
|
@ -1957,7 +1957,13 @@ generic_make_request_checks(struct bio *bio)
|
||||
if (!blkcg_bio_issue_check(q, bio))
|
||||
return false;
|
||||
|
||||
trace_block_bio_queue(q, bio);
|
||||
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
trace_block_bio_queue(q, bio);
|
||||
/* Now that enqueuing has been traced, we need to trace
|
||||
* completion as well.
|
||||
*/
|
||||
bio_set_flag(bio, BIO_TRACE_COMPLETION);
|
||||
}
|
||||
return true;
|
||||
|
||||
not_supported:
|
||||
@ -2622,6 +2628,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
if (bio_bytes == bio->bi_iter.bi_size)
|
||||
req->bio = bio->bi_next;
|
||||
|
||||
/* Completion has already been traced */
|
||||
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
||||
req_bio_endio(req, bio, bio_bytes, error);
|
||||
|
||||
total_bytes += bio_bytes;
|
||||
|
@ -810,7 +810,6 @@ static void dec_pending(struct dm_io *io, int error)
|
||||
queue_io(md, bio);
|
||||
} else {
|
||||
/* done with normal IO or empty flush */
|
||||
trace_block_bio_complete(md->queue, bio, io_error);
|
||||
bio->bi_error = io_error;
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
@ -5031,8 +5031,6 @@ static void raid5_align_endio(struct bio *bi)
|
||||
rdev_dec_pending(rdev, conf->mddev);
|
||||
|
||||
if (!error) {
|
||||
trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
|
||||
raid_bi, 0);
|
||||
bio_endio(raid_bi);
|
||||
if (atomic_dec_and_test(&conf->active_aligned_reads))
|
||||
wake_up(&conf->wait_for_quiescent);
|
||||
|
@ -110,6 +110,8 @@ struct bio {
|
||||
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
|
||||
#define BIO_THROTTLED 9 /* This bio has already been subjected to
|
||||
* throttling rules. Don't do it again. */
|
||||
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
|
||||
* of this bio. */
|
||||
/* See BVEC_POOL_OFFSET below before adding new flags */
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user