mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
io_uring: Add to traces the req pointer when available
The req pointer uniquely identify a specific request. Having it in traces can provide valuable insights that is not possible to have if the calling process is reusing the same user_data value. Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Olivier Langlois <olivier@trillion01.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2335f6f5dd
commit
236daeae36
@ -5073,7 +5073,7 @@ static void io_async_task_func(struct callback_head *cb)
|
|||||||
struct async_poll *apoll = req->apoll;
|
struct async_poll *apoll = req->apoll;
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
|
trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
|
||||||
|
|
||||||
if (io_poll_rewait(req, &apoll->poll)) {
|
if (io_poll_rewait(req, &apoll->poll)) {
|
||||||
spin_unlock_irq(&ctx->completion_lock);
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
@ -5206,8 +5206,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&ctx->completion_lock);
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
|
trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
|
||||||
apoll->poll.events);
|
mask, apoll->poll.events);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6604,8 +6604,9 @@ fail_req:
|
|||||||
goto fail_req;
|
goto fail_req;
|
||||||
|
|
||||||
/* don't need @sqe from now on */
|
/* don't need @sqe from now on */
|
||||||
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
|
trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
|
||||||
true, ctx->flags & IORING_SETUP_SQPOLL);
|
req->flags, true,
|
||||||
|
ctx->flags & IORING_SETUP_SQPOLL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we already have a head request, queue this one for async
|
* If we already have a head request, queue this one for async
|
||||||
|
@ -323,8 +323,10 @@ TRACE_EVENT(io_uring_complete,
|
|||||||
* io_uring_submit_sqe - called before submitting one SQE
|
* io_uring_submit_sqe - called before submitting one SQE
|
||||||
*
|
*
|
||||||
* @ctx: pointer to a ring context structure
|
* @ctx: pointer to a ring context structure
|
||||||
|
* @req: pointer to a submitted request
|
||||||
* @opcode: opcode of request
|
* @opcode: opcode of request
|
||||||
* @user_data: user data associated with the request
|
* @user_data: user data associated with the request
|
||||||
|
* @flags request flags
|
||||||
* @force_nonblock: whether a context blocking or not
|
* @force_nonblock: whether a context blocking or not
|
||||||
* @sq_thread: true if sq_thread has submitted this SQE
|
* @sq_thread: true if sq_thread has submitted this SQE
|
||||||
*
|
*
|
||||||
@ -333,41 +335,60 @@ TRACE_EVENT(io_uring_complete,
|
|||||||
*/
|
*/
|
||||||
TRACE_EVENT(io_uring_submit_sqe,
|
TRACE_EVENT(io_uring_submit_sqe,
|
||||||
|
|
||||||
TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
|
TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags,
|
||||||
bool sq_thread),
|
bool force_nonblock, bool sq_thread),
|
||||||
|
|
||||||
TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
|
TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread),
|
||||||
|
|
||||||
TP_STRUCT__entry (
|
TP_STRUCT__entry (
|
||||||
__field( void *, ctx )
|
__field( void *, ctx )
|
||||||
|
__field( void *, req )
|
||||||
__field( u8, opcode )
|
__field( u8, opcode )
|
||||||
__field( u64, user_data )
|
__field( u64, user_data )
|
||||||
|
__field( u32, flags )
|
||||||
__field( bool, force_nonblock )
|
__field( bool, force_nonblock )
|
||||||
__field( bool, sq_thread )
|
__field( bool, sq_thread )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->ctx = ctx;
|
__entry->ctx = ctx;
|
||||||
|
__entry->req = req;
|
||||||
__entry->opcode = opcode;
|
__entry->opcode = opcode;
|
||||||
__entry->user_data = user_data;
|
__entry->user_data = user_data;
|
||||||
|
__entry->flags = flags;
|
||||||
__entry->force_nonblock = force_nonblock;
|
__entry->force_nonblock = force_nonblock;
|
||||||
__entry->sq_thread = sq_thread;
|
__entry->sq_thread = sq_thread;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
|
TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, "
|
||||||
__entry->ctx, __entry->opcode,
|
"non block %d, sq_thread %d", __entry->ctx, __entry->req,
|
||||||
(unsigned long long) __entry->user_data,
|
__entry->opcode, (unsigned long long)__entry->user_data,
|
||||||
__entry->force_nonblock, __entry->sq_thread)
|
__entry->flags, __entry->force_nonblock, __entry->sq_thread)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* io_uring_poll_arm - called after arming a poll wait if successful
|
||||||
|
*
|
||||||
|
* @ctx: pointer to a ring context structure
|
||||||
|
* @req: pointer to the armed request
|
||||||
|
* @opcode: opcode of request
|
||||||
|
* @user_data: user data associated with the request
|
||||||
|
* @mask: request poll events mask
|
||||||
|
* @events: registered events of interest
|
||||||
|
*
|
||||||
|
* Allows to track which fds are waiting for and what are the events of
|
||||||
|
* interest.
|
||||||
|
*/
|
||||||
TRACE_EVENT(io_uring_poll_arm,
|
TRACE_EVENT(io_uring_poll_arm,
|
||||||
|
|
||||||
TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events),
|
TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data,
|
||||||
|
int mask, int events),
|
||||||
|
|
||||||
TP_ARGS(ctx, opcode, user_data, mask, events),
|
TP_ARGS(ctx, req, opcode, user_data, mask, events),
|
||||||
|
|
||||||
TP_STRUCT__entry (
|
TP_STRUCT__entry (
|
||||||
__field( void *, ctx )
|
__field( void *, ctx )
|
||||||
|
__field( void *, req )
|
||||||
__field( u8, opcode )
|
__field( u8, opcode )
|
||||||
__field( u64, user_data )
|
__field( u64, user_data )
|
||||||
__field( int, mask )
|
__field( int, mask )
|
||||||
@ -376,16 +397,17 @@ TRACE_EVENT(io_uring_poll_arm,
|
|||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->ctx = ctx;
|
__entry->ctx = ctx;
|
||||||
|
__entry->req = req;
|
||||||
__entry->opcode = opcode;
|
__entry->opcode = opcode;
|
||||||
__entry->user_data = user_data;
|
__entry->user_data = user_data;
|
||||||
__entry->mask = mask;
|
__entry->mask = mask;
|
||||||
__entry->events = events;
|
__entry->events = events;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
|
TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
|
||||||
__entry->ctx, __entry->opcode,
|
__entry->ctx, __entry->req, __entry->opcode,
|
||||||
(unsigned long long) __entry->user_data,
|
(unsigned long long) __entry->user_data,
|
||||||
__entry->mask, __entry->events)
|
__entry->mask, __entry->events)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(io_uring_poll_wake,
|
TRACE_EVENT(io_uring_poll_wake,
|
||||||
@ -440,27 +462,40 @@ TRACE_EVENT(io_uring_task_add,
|
|||||||
__entry->mask)
|
__entry->mask)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* io_uring_task_run - called when task_work_run() executes the poll events
|
||||||
|
* notification callbacks
|
||||||
|
*
|
||||||
|
* @ctx: pointer to a ring context structure
|
||||||
|
* @req: pointer to the armed request
|
||||||
|
* @opcode: opcode of request
|
||||||
|
* @user_data: user data associated with the request
|
||||||
|
*
|
||||||
|
* Allows to track when notified poll events are processed
|
||||||
|
*/
|
||||||
TRACE_EVENT(io_uring_task_run,
|
TRACE_EVENT(io_uring_task_run,
|
||||||
|
|
||||||
TP_PROTO(void *ctx, u8 opcode, u64 user_data),
|
TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data),
|
||||||
|
|
||||||
TP_ARGS(ctx, opcode, user_data),
|
TP_ARGS(ctx, req, opcode, user_data),
|
||||||
|
|
||||||
TP_STRUCT__entry (
|
TP_STRUCT__entry (
|
||||||
__field( void *, ctx )
|
__field( void *, ctx )
|
||||||
|
__field( void *, req )
|
||||||
__field( u8, opcode )
|
__field( u8, opcode )
|
||||||
__field( u64, user_data )
|
__field( u64, user_data )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->ctx = ctx;
|
__entry->ctx = ctx;
|
||||||
|
__entry->req = req;
|
||||||
__entry->opcode = opcode;
|
__entry->opcode = opcode;
|
||||||
__entry->user_data = user_data;
|
__entry->user_data = user_data;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("ring %p, op %d, data 0x%llx",
|
TP_printk("ring %p, req %p, op %d, data 0x%llx",
|
||||||
__entry->ctx, __entry->opcode,
|
__entry->ctx, __entry->req, __entry->opcode,
|
||||||
(unsigned long long) __entry->user_data)
|
(unsigned long long) __entry->user_data)
|
||||||
);
|
);
|
||||||
|
|
||||||
#endif /* _TRACE_IO_URING_H */
|
#endif /* _TRACE_IO_URING_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user