block: remove the request_queue to argument request based tracepoints
The request_queue can trivially be derived from the request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1c02fca620
commit
a54895fa05
@ -799,7 +799,7 @@ static struct request *attempt_merge(struct request_queue *q,
|
|||||||
*/
|
*/
|
||||||
blk_account_io_merge_request(next);
|
blk_account_io_merge_request(next);
|
||||||
|
|
||||||
trace_block_rq_merge(q, next);
|
trace_block_rq_merge(next);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ownership of bio passed from next to req, return 'next' for
|
* ownership of bio passed from next to req, return 'next' for
|
||||||
|
@ -386,7 +386,7 @@ EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
|
|||||||
|
|
||||||
void blk_mq_sched_request_inserted(struct request *rq)
|
void blk_mq_sched_request_inserted(struct request *rq)
|
||||||
{
|
{
|
||||||
trace_block_rq_insert(rq->q, rq);
|
trace_block_rq_insert(rq);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
||||||
|
|
||||||
|
@ -733,7 +733,7 @@ void blk_mq_start_request(struct request *rq)
|
|||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
|
|
||||||
trace_block_rq_issue(q, rq);
|
trace_block_rq_issue(rq);
|
||||||
|
|
||||||
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
|
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
|
||||||
rq->io_start_time_ns = ktime_get_ns();
|
rq->io_start_time_ns = ktime_get_ns();
|
||||||
@ -760,7 +760,7 @@ static void __blk_mq_requeue_request(struct request *rq)
|
|||||||
|
|
||||||
blk_mq_put_driver_tag(rq);
|
blk_mq_put_driver_tag(rq);
|
||||||
|
|
||||||
trace_block_rq_requeue(q, rq);
|
trace_block_rq_requeue(rq);
|
||||||
rq_qos_requeue(q, rq);
|
rq_qos_requeue(q, rq);
|
||||||
|
|
||||||
if (blk_mq_request_started(rq)) {
|
if (blk_mq_request_started(rq)) {
|
||||||
@ -1821,7 +1821,7 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
lockdep_assert_held(&ctx->lock);
|
lockdep_assert_held(&ctx->lock);
|
||||||
|
|
||||||
trace_block_rq_insert(hctx->queue, rq);
|
trace_block_rq_insert(rq);
|
||||||
|
|
||||||
if (at_head)
|
if (at_head)
|
||||||
list_add(&rq->queuelist, &ctx->rq_lists[type]);
|
list_add(&rq->queuelist, &ctx->rq_lists[type]);
|
||||||
@ -1878,7 +1878,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
|||||||
*/
|
*/
|
||||||
list_for_each_entry(rq, list, queuelist) {
|
list_for_each_entry(rq, list, queuelist) {
|
||||||
BUG_ON(rq->mq_ctx != ctx);
|
BUG_ON(rq->mq_ctx != ctx);
|
||||||
trace_block_rq_insert(hctx->queue, rq);
|
trace_block_rq_insert(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&ctx->lock);
|
spin_lock(&ctx->lock);
|
||||||
|
@ -397,7 +397,7 @@ static int map_request(struct dm_rq_target_io *tio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* The target has remapped the I/O so dispatch it */
|
/* The target has remapped the I/O so dispatch it */
|
||||||
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
|
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
|
||||||
blk_rq_pos(rq));
|
blk_rq_pos(rq));
|
||||||
ret = dm_dispatch_clone_request(clone, rq);
|
ret = dm_dispatch_clone_request(clone, rq);
|
||||||
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
|
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
|
||||||
|
@ -2359,8 +2359,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
|
blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
|
||||||
sizeof(blktrc));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -75,8 +75,7 @@ static inline bool blk_trace_note_message_enabled(struct request_queue *q)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
|
extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
|
||||||
void *data, size_t len);
|
|
||||||
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||||
struct block_device *bdev,
|
struct block_device *bdev,
|
||||||
char __user *arg);
|
char __user *arg);
|
||||||
@ -90,7 +89,7 @@ extern struct attribute_group blk_trace_attr_group;
|
|||||||
#else /* !CONFIG_BLK_DEV_IO_TRACE */
|
#else /* !CONFIG_BLK_DEV_IO_TRACE */
|
||||||
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
|
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
|
||||||
# define blk_trace_shutdown(q) do { } while (0)
|
# define blk_trace_shutdown(q) do { } while (0)
|
||||||
# define blk_add_driver_data(q, rq, data, len) do {} while (0)
|
# define blk_add_driver_data(rq, data, len) do {} while (0)
|
||||||
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
|
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
|
||||||
# define blk_trace_startstop(q, start) (-ENOTTY)
|
# define blk_trace_startstop(q, start) (-ENOTTY)
|
||||||
# define blk_trace_remove(q) (-ENOTTY)
|
# define blk_trace_remove(q) (-ENOTTY)
|
||||||
|
@ -64,7 +64,6 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* block_rq_requeue - place block IO request back on a queue
|
* block_rq_requeue - place block IO request back on a queue
|
||||||
* @q: queue holding operation
|
|
||||||
* @rq: block IO operation request
|
* @rq: block IO operation request
|
||||||
*
|
*
|
||||||
* The block operation request @rq is being placed back into queue
|
* The block operation request @rq is being placed back into queue
|
||||||
@ -73,9 +72,9 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
|
|||||||
*/
|
*/
|
||||||
TRACE_EVENT(block_rq_requeue,
|
TRACE_EVENT(block_rq_requeue,
|
||||||
|
|
||||||
TP_PROTO(struct request_queue *q, struct request *rq),
|
TP_PROTO(struct request *rq),
|
||||||
|
|
||||||
TP_ARGS(q, rq),
|
TP_ARGS(rq),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( dev_t, dev )
|
__field( dev_t, dev )
|
||||||
@ -147,9 +146,9 @@ TRACE_EVENT(block_rq_complete,
|
|||||||
|
|
||||||
DECLARE_EVENT_CLASS(block_rq,
|
DECLARE_EVENT_CLASS(block_rq,
|
||||||
|
|
||||||
TP_PROTO(struct request_queue *q, struct request *rq),
|
TP_PROTO(struct request *rq),
|
||||||
|
|
||||||
TP_ARGS(q, rq),
|
TP_ARGS(rq),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( dev_t, dev )
|
__field( dev_t, dev )
|
||||||
@ -181,7 +180,6 @@ DECLARE_EVENT_CLASS(block_rq,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* block_rq_insert - insert block operation request into queue
|
* block_rq_insert - insert block operation request into queue
|
||||||
* @q: target queue
|
|
||||||
* @rq: block IO operation request
|
* @rq: block IO operation request
|
||||||
*
|
*
|
||||||
* Called immediately before block operation request @rq is inserted
|
* Called immediately before block operation request @rq is inserted
|
||||||
@ -191,14 +189,13 @@ DECLARE_EVENT_CLASS(block_rq,
|
|||||||
*/
|
*/
|
||||||
DEFINE_EVENT(block_rq, block_rq_insert,
|
DEFINE_EVENT(block_rq, block_rq_insert,
|
||||||
|
|
||||||
TP_PROTO(struct request_queue *q, struct request *rq),
|
TP_PROTO(struct request *rq),
|
||||||
|
|
||||||
TP_ARGS(q, rq)
|
TP_ARGS(rq)
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* block_rq_issue - issue pending block IO request operation to device driver
|
* block_rq_issue - issue pending block IO request operation to device driver
|
||||||
* @q: queue holding operation
|
|
||||||
* @rq: block IO operation operation request
|
* @rq: block IO operation operation request
|
||||||
*
|
*
|
||||||
* Called when block operation request @rq from queue @q is sent to a
|
* Called when block operation request @rq from queue @q is sent to a
|
||||||
@ -206,14 +203,13 @@ DEFINE_EVENT(block_rq, block_rq_insert,
|
|||||||
*/
|
*/
|
||||||
DEFINE_EVENT(block_rq, block_rq_issue,
|
DEFINE_EVENT(block_rq, block_rq_issue,
|
||||||
|
|
||||||
TP_PROTO(struct request_queue *q, struct request *rq),
|
TP_PROTO(struct request *rq),
|
||||||
|
|
||||||
TP_ARGS(q, rq)
|
TP_ARGS(rq)
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* block_rq_merge - merge request with another one in the elevator
|
* block_rq_merge - merge request with another one in the elevator
|
||||||
* @q: queue holding operation
|
|
||||||
* @rq: block IO operation operation request
|
* @rq: block IO operation operation request
|
||||||
*
|
*
|
||||||
* Called when block operation request @rq from queue @q is merged to another
|
* Called when block operation request @rq from queue @q is merged to another
|
||||||
@ -221,9 +217,9 @@ DEFINE_EVENT(block_rq, block_rq_issue,
|
|||||||
*/
|
*/
|
||||||
DEFINE_EVENT(block_rq, block_rq_merge,
|
DEFINE_EVENT(block_rq, block_rq_merge,
|
||||||
|
|
||||||
TP_PROTO(struct request_queue *q, struct request *rq),
|
TP_PROTO(struct request *rq),
|
||||||
|
|
||||||
TP_ARGS(q, rq)
|
TP_ARGS(rq)
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -491,7 +487,6 @@ TRACE_EVENT(block_bio_remap,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* block_rq_remap - map request for a block operation request
|
* block_rq_remap - map request for a block operation request
|
||||||
* @q: queue holding the operation
|
|
||||||
* @rq: block IO operation request
|
* @rq: block IO operation request
|
||||||
* @dev: device for the operation
|
* @dev: device for the operation
|
||||||
* @from: original sector for the operation
|
* @from: original sector for the operation
|
||||||
@ -502,10 +497,9 @@ TRACE_EVENT(block_bio_remap,
|
|||||||
*/
|
*/
|
||||||
TRACE_EVENT(block_rq_remap,
|
TRACE_EVENT(block_rq_remap,
|
||||||
|
|
||||||
TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
|
TP_PROTO(struct request *rq, dev_t dev, sector_t from),
|
||||||
sector_t from),
|
|
||||||
|
|
||||||
TP_ARGS(q, rq, dev, from),
|
TP_ARGS(rq, dev, from),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( dev_t, dev )
|
__field( dev_t, dev )
|
||||||
|
@ -795,12 +795,12 @@ static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static u64
|
static u64
|
||||||
blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
|
blk_trace_request_get_cgid(struct request *rq)
|
||||||
{
|
{
|
||||||
if (!rq->bio)
|
if (!rq->bio)
|
||||||
return 0;
|
return 0;
|
||||||
/* Use the first bio */
|
/* Use the first bio */
|
||||||
return blk_trace_bio_get_cgid(q, rq->bio);
|
return blk_trace_bio_get_cgid(rq->q, rq->bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -841,40 +841,35 @@ static void blk_add_trace_rq(struct request *rq, int error,
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_insert(void *ignore,
|
static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
|
||||||
struct request_queue *q, struct request *rq)
|
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
|
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
|
||||||
blk_trace_request_get_cgid(q, rq));
|
blk_trace_request_get_cgid(rq));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_issue(void *ignore,
|
static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
|
||||||
struct request_queue *q, struct request *rq)
|
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
|
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
|
||||||
blk_trace_request_get_cgid(q, rq));
|
blk_trace_request_get_cgid(rq));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_merge(void *ignore,
|
static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
|
||||||
struct request_queue *q, struct request *rq)
|
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
|
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
|
||||||
blk_trace_request_get_cgid(q, rq));
|
blk_trace_request_get_cgid(rq));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_requeue(void *ignore,
|
static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
|
||||||
struct request_queue *q,
|
|
||||||
struct request *rq)
|
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
|
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
|
||||||
blk_trace_request_get_cgid(q, rq));
|
blk_trace_request_get_cgid(rq));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
|
static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
|
||||||
int error, unsigned int nr_bytes)
|
int error, unsigned int nr_bytes)
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
|
blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
|
||||||
blk_trace_request_get_cgid(rq->q, rq));
|
blk_trace_request_get_cgid(rq));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1037,16 +1032,14 @@ static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
|
|||||||
* Add a trace for that action.
|
* Add a trace for that action.
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
static void blk_add_trace_rq_remap(void *ignore,
|
static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
|
||||||
struct request_queue *q,
|
|
||||||
struct request *rq, dev_t dev,
|
|
||||||
sector_t from)
|
sector_t from)
|
||||||
{
|
{
|
||||||
struct blk_trace *bt;
|
struct blk_trace *bt;
|
||||||
struct blk_io_trace_remap r;
|
struct blk_io_trace_remap r;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
bt = rcu_dereference(q->blk_trace);
|
bt = rcu_dereference(rq->q->blk_trace);
|
||||||
if (likely(!bt)) {
|
if (likely(!bt)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return;
|
return;
|
||||||
@ -1058,13 +1051,12 @@ static void blk_add_trace_rq_remap(void *ignore,
|
|||||||
|
|
||||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
||||||
rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
|
rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
|
||||||
sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
|
sizeof(r), &r, blk_trace_request_get_cgid(rq));
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_add_driver_data - Add binary message with driver-specific data
|
* blk_add_driver_data - Add binary message with driver-specific data
|
||||||
* @q: queue the io is for
|
|
||||||
* @rq: io request
|
* @rq: io request
|
||||||
* @data: driver-specific data
|
* @data: driver-specific data
|
||||||
* @len: length of driver-specific data
|
* @len: length of driver-specific data
|
||||||
@ -1073,14 +1065,12 @@ static void blk_add_trace_rq_remap(void *ignore,
|
|||||||
* Some drivers might want to write driver-specific data per request.
|
* Some drivers might want to write driver-specific data per request.
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
void blk_add_driver_data(struct request_queue *q,
|
void blk_add_driver_data(struct request *rq, void *data, size_t len)
|
||||||
struct request *rq,
|
|
||||||
void *data, size_t len)
|
|
||||||
{
|
{
|
||||||
struct blk_trace *bt;
|
struct blk_trace *bt;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
bt = rcu_dereference(q->blk_trace);
|
bt = rcu_dereference(rq->q->blk_trace);
|
||||||
if (likely(!bt)) {
|
if (likely(!bt)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return;
|
return;
|
||||||
@ -1088,7 +1078,7 @@ void blk_add_driver_data(struct request_queue *q,
|
|||||||
|
|
||||||
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
|
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
|
||||||
BLK_TA_DRV_DATA, 0, len, data,
|
BLK_TA_DRV_DATA, 0, len, data,
|
||||||
blk_trace_request_get_cgid(q, rq));
|
blk_trace_request_get_cgid(rq));
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_add_driver_data);
|
EXPORT_SYMBOL_GPL(blk_add_driver_data);
|
||||||
|
Loading…
Reference in New Issue
Block a user